python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* truncate.c
*
* PURPOSE
* Truncate handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* (C) 1999-2004 Ben Fennema
* (C) 1999 Stelias Computing Inc
*
* HISTORY
*
* 02/24/99 blf Created.
*
*/
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/mm.h>
#include "udf_i.h"
#include "udf_sb.h"
static void extent_trunc(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, int8_t etype, uint32_t elen,
uint32_t nelen)
{
struct kernel_lb_addr neloc = {};
int last_block = (elen + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
int first_block = (nelen + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (nelen) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode, eloc, 0,
last_block);
etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30);
} else
neloc = *eloc;
nelen = (etype << 30) | nelen;
}
if (elen != nelen) {
udf_write_aext(inode, epos, &neloc, nelen, 0);
if (last_block > first_block) {
if (etype == (EXT_RECORDED_ALLOCATED >> 30))
mark_inode_dirty(inode);
if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
udf_free_blocks(inode->i_sb, inode, eloc,
first_block,
last_block - first_block);
}
}
}
/*
* Truncate the last extent to match i_size. This function assumes
* that preallocation extent is already truncated.
*/
void udf_truncate_tail_extent(struct inode *inode)
{
struct extent_position epos = {};
struct kernel_lb_addr eloc;
uint32_t elen, nelen;
uint64_t lbcount = 0;
int8_t etype = -1, netype;
int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
inode->i_size == iinfo->i_lenExtents)
return;
/* Are we going to delete the file anyway? */
if (inode->i_nlink == 0)
return;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
BUG();
/* Find the last extent in the file */
while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
etype = netype;
lbcount += elen;
if (lbcount > inode->i_size) {
if (lbcount - inode->i_size >= inode->i_sb->s_blocksize)
udf_warn(inode->i_sb,
"Too long extent after EOF in inode %u: i_size: %lld lbcount: %lld extent %u+%u\n",
(unsigned)inode->i_ino,
(long long)inode->i_size,
(long long)lbcount,
(unsigned)eloc.logicalBlockNum,
(unsigned)elen);
nelen = elen - (lbcount - inode->i_size);
epos.offset -= adsize;
extent_trunc(inode, &epos, &eloc, etype, elen, nelen);
epos.offset += adsize;
if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1)
udf_err(inode->i_sb,
"Extent after EOF in inode %u\n",
(unsigned)inode->i_ino);
break;
}
}
/* This inode entry is in-memory only and thus we don't have to mark
* the inode dirty */
iinfo->i_lenExtents = inode->i_size;
brelse(epos.bh);
}
void udf_discard_prealloc(struct inode *inode)
{
struct extent_position epos = {};
struct extent_position prev_epos = {};
struct kernel_lb_addr eloc;
uint32_t elen;
uint64_t lbcount = 0;
int8_t etype = -1;
struct udf_inode_info *iinfo = UDF_I(inode);
int bsize = i_blocksize(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
return;
epos.block = iinfo->i_location;
/* Find the last extent in the file */
while (udf_next_aext(inode, &epos, &eloc, &elen, 0) != -1) {
brelse(prev_epos.bh);
prev_epos = epos;
if (prev_epos.bh)
get_bh(prev_epos.bh);
etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
lbcount += elen;
}
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
lbcount -= elen;
udf_delete_aext(inode, prev_epos);
udf_free_blocks(inode->i_sb, inode, &eloc, 0,
DIV_ROUND_UP(elen, bsize));
}
/* This inode entry is in-memory only and thus we don't have to mark
* the inode dirty */
iinfo->i_lenExtents = lbcount;
brelse(epos.bh);
brelse(prev_epos.bh);
}
static void udf_update_alloc_ext_desc(struct inode *inode,
struct extent_position *epos,
u32 lenalloc)
{
struct super_block *sb = inode->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct allocExtDesc *aed = (struct allocExtDesc *) (epos->bh->b_data);
int len = sizeof(struct allocExtDesc);
aed->lengthAllocDescs = cpu_to_le32(lenalloc);
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201)
len += lenalloc;
udf_update_tag(epos->bh->b_data, len);
mark_buffer_dirty_inode(epos->bh, inode);
}
/*
* Truncate extents of inode to inode->i_size. This function can be used only
* for making file shorter. For making file longer, udf_extend_file() has to
* be used.
*/
int udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
struct kernel_lb_addr eloc, neloc = {};
uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
int8_t etype;
struct super_block *sb = inode->i_sb;
sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset;
loff_t byte_offset;
int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
BUG();
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
byte_offset = (offset << sb->s_blocksize_bits) +
(inode->i_size & (sb->s_blocksize - 1));
if (etype == -1) {
/* We should extend the file? */
WARN_ON(byte_offset);
return 0;
}
epos.offset -= adsize;
extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
epos.offset += adsize;
if (byte_offset)
lenalloc = epos.offset;
else
lenalloc = epos.offset - adsize;
if (!epos.bh)
lenalloc -= udf_file_entry_alloc_offset(inode);
else
lenalloc -= sizeof(struct allocExtDesc);
while ((etype = udf_current_aext(inode, &epos, &eloc,
&elen, 0)) != -1) {
if (etype == (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
udf_write_aext(inode, &epos, &neloc, nelen, 0);
if (indirect_ext_len) {
/* We managed to free all extents in the
* indirect extent - free it too */
BUG_ON(!epos.bh);
udf_free_blocks(sb, NULL, &epos.block,
0, indirect_ext_len);
} else if (!epos.bh) {
iinfo->i_lenAlloc = lenalloc;
mark_inode_dirty(inode);
} else
udf_update_alloc_ext_desc(inode,
&epos, lenalloc);
brelse(epos.bh);
epos.offset = sizeof(struct allocExtDesc);
epos.block = eloc;
epos.bh = sb_bread(sb,
udf_get_lb_pblock(sb, &eloc, 0));
/* Error reading indirect block? */
if (!epos.bh)
return -EIO;
if (elen)
indirect_ext_len =
(elen + sb->s_blocksize - 1) >>
sb->s_blocksize_bits;
else
indirect_ext_len = 1;
} else {
extent_trunc(inode, &epos, &eloc, etype, elen, 0);
epos.offset += adsize;
}
}
if (indirect_ext_len) {
BUG_ON(!epos.bh);
udf_free_blocks(sb, NULL, &epos.block, 0, indirect_ext_len);
} else if (!epos.bh) {
iinfo->i_lenAlloc = lenalloc;
mark_inode_dirty(inode);
} else
udf_update_alloc_ext_desc(inode, &epos, lenalloc);
iinfo->i_lenExtents = inode->i_size;
brelse(epos.bh);
return 0;
}
| linux-master | fs/udf/truncate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* namei.c
*
* PURPOSE
* Inode name handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 12/12/98 blf Created. Split out the lookup code from dir.c
* 04/19/99 blf link, mknod, symlink support
*/
#include "udfdecl.h"
#include "udf_i.h"
#include "udf_sb.h"
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/crc-itu-t.h>
#include <linux/exportfs.h>
#include <linux/iversion.h>
static inline int udf_match(int len1, const unsigned char *name1, int len2,
const unsigned char *name2)
{
if (len1 != len2)
return 0;
return !memcmp(name1, name2, len1);
}
/**
* udf_fiiter_find_entry - find entry in given directory.
*
* @dir: directory inode to search in
* @child: qstr of the name
* @iter: iter to use for searching
*
* This function searches in the directory @dir for a file name @child. When
* found, @iter points to the position in the directory with given entry.
*
* Returns 0 on success, < 0 on error (including -ENOENT).
*/
static int udf_fiiter_find_entry(struct inode *dir, const struct qstr *child,
struct udf_fileident_iter *iter)
{
int flen;
unsigned char *fname = NULL;
struct super_block *sb = dir->i_sb;
int isdotdot = child->len == 2 &&
child->name[0] == '.' && child->name[1] == '.';
int ret;
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!fname)
return -ENOMEM;
for (ret = udf_fiiter_init(iter, dir, 0);
!ret && iter->pos < dir->i_size;
ret = udf_fiiter_advance(iter)) {
if (iter->fi.fileCharacteristics & FID_FILE_CHAR_DELETED) {
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if (iter->fi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) {
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
if ((iter->fi.fileCharacteristics & FID_FILE_CHAR_PARENT) &&
isdotdot)
goto out_ok;
if (!iter->fi.lengthFileIdent)
continue;
flen = udf_get_filename(sb, iter->name,
iter->fi.lengthFileIdent, fname, UDF_NAME_LEN);
if (flen < 0) {
ret = flen;
goto out_err;
}
if (udf_match(flen, fname, child->len, child->name))
goto out_ok;
}
if (!ret)
ret = -ENOENT;
out_err:
udf_fiiter_release(iter);
out_ok:
kfree(fname);
return ret;
}
static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = NULL;
struct udf_fileident_iter iter;
int err;
if (dentry->d_name.len > UDF_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
err = udf_fiiter_find_entry(dir, &dentry->d_name, &iter);
if (err < 0 && err != -ENOENT)
return ERR_PTR(err);
if (err == 0) {
struct kernel_lb_addr loc;
loc = lelb_to_cpu(iter.fi.icb.extLocation);
udf_fiiter_release(&iter);
inode = udf_iget(dir->i_sb, &loc);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
static int udf_expand_dir_adinicb(struct inode *inode, udf_pblk_t *block)
{
udf_pblk_t newblock;
struct buffer_head *dbh = NULL;
struct kernel_lb_addr eloc;
struct extent_position epos;
uint8_t alloctype;
struct udf_inode_info *iinfo = UDF_I(inode);
struct udf_fileident_iter iter;
uint8_t *impuse;
int ret;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
alloctype = ICBTAG_FLAG_AD_SHORT;
else
alloctype = ICBTAG_FLAG_AD_LONG;
if (!inode->i_size) {
iinfo->i_alloc_type = alloctype;
mark_inode_dirty(inode);
return 0;
}
/* alloc block, and copy data to it */
*block = udf_new_block(inode->i_sb, inode,
iinfo->i_location.partitionReferenceNum,
iinfo->i_location.logicalBlockNum, &ret);
if (!(*block))
return ret;
newblock = udf_get_pblock(inode->i_sb, *block,
iinfo->i_location.partitionReferenceNum,
0);
if (newblock == 0xffffffff)
return -EFSCORRUPTED;
dbh = sb_getblk(inode->i_sb, newblock);
if (!dbh)
return -ENOMEM;
lock_buffer(dbh);
memcpy(dbh->b_data, iinfo->i_data, inode->i_size);
memset(dbh->b_data + inode->i_size, 0,
inode->i_sb->s_blocksize - inode->i_size);
set_buffer_uptodate(dbh);
unlock_buffer(dbh);
/* Drop inline data, add block instead */
iinfo->i_alloc_type = alloctype;
memset(iinfo->i_data + iinfo->i_lenEAttr, 0, iinfo->i_lenAlloc);
iinfo->i_lenAlloc = 0;
eloc.logicalBlockNum = *block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
iinfo->i_lenExtents = inode->i_size;
epos.bh = NULL;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
ret = udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
brelse(epos.bh);
if (ret < 0) {
brelse(dbh);
udf_free_blocks(inode->i_sb, inode, &eloc, 0, 1);
return ret;
}
mark_inode_dirty(inode);
/* Now fixup tags in moved directory entries */
for (ret = udf_fiiter_init(&iter, inode, 0);
!ret && iter.pos < inode->i_size;
ret = udf_fiiter_advance(&iter)) {
iter.fi.descTag.tagLocation = cpu_to_le32(*block);
if (iter.fi.lengthOfImpUse != cpu_to_le16(0))
impuse = dbh->b_data + iter.pos +
sizeof(struct fileIdentDesc);
else
impuse = NULL;
udf_fiiter_write_fi(&iter, impuse);
}
brelse(dbh);
/*
* We don't expect the iteration to fail as the directory has been
* already verified to be correct
*/
WARN_ON_ONCE(ret);
udf_fiiter_release(&iter);
return 0;
}
static int udf_fiiter_add_entry(struct inode *dir, struct dentry *dentry,
struct udf_fileident_iter *iter)
{
struct udf_inode_info *dinfo = UDF_I(dir);
int nfidlen, namelen = 0;
int ret;
int off, blksize = 1 << dir->i_blkbits;
udf_pblk_t block;
char name[UDF_NAME_LEN_CS0];
if (dentry) {
if (!dentry->d_name.len)
return -EINVAL;
namelen = udf_put_filename(dir->i_sb, dentry->d_name.name,
dentry->d_name.len,
name, UDF_NAME_LEN_CS0);
if (!namelen)
return -ENAMETOOLONG;
}
nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
for (ret = udf_fiiter_init(iter, dir, 0);
!ret && iter->pos < dir->i_size;
ret = udf_fiiter_advance(iter)) {
if (iter->fi.fileCharacteristics & FID_FILE_CHAR_DELETED) {
if (udf_dir_entry_len(&iter->fi) == nfidlen) {
iter->fi.descTag.tagSerialNum = cpu_to_le16(1);
iter->fi.fileVersionNum = cpu_to_le16(1);
iter->fi.fileCharacteristics = 0;
iter->fi.lengthFileIdent = namelen;
iter->fi.lengthOfImpUse = cpu_to_le16(0);
memcpy(iter->namebuf, name, namelen);
iter->name = iter->namebuf;
return 0;
}
}
}
if (ret) {
udf_fiiter_release(iter);
return ret;
}
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
blksize - udf_ext0_offset(dir) - iter->pos < nfidlen) {
udf_fiiter_release(iter);
ret = udf_expand_dir_adinicb(dir, &block);
if (ret)
return ret;
ret = udf_fiiter_init(iter, dir, dir->i_size);
if (ret < 0)
return ret;
}
/* Get blocknumber to use for entry tag */
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
block = dinfo->i_location.logicalBlockNum;
} else {
block = iter->eloc.logicalBlockNum +
((iter->elen - 1) >> dir->i_blkbits);
}
off = iter->pos & (blksize - 1);
if (!off)
off = blksize;
/* Entry fits into current block? */
if (blksize - udf_ext0_offset(dir) - off >= nfidlen)
goto store_fi;
ret = udf_fiiter_append_blk(iter);
if (ret) {
udf_fiiter_release(iter);
return ret;
}
/* Entry will be completely in the new block? Update tag location... */
if (!(iter->pos & (blksize - 1)))
block = iter->eloc.logicalBlockNum +
((iter->elen - 1) >> dir->i_blkbits);
store_fi:
memset(&iter->fi, 0, sizeof(struct fileIdentDesc));
if (UDF_SB(dir->i_sb)->s_udfrev >= 0x0200)
udf_new_tag((char *)(&iter->fi), TAG_IDENT_FID, 3, 1, block,
sizeof(struct tag));
else
udf_new_tag((char *)(&iter->fi), TAG_IDENT_FID, 2, 1, block,
sizeof(struct tag));
iter->fi.fileVersionNum = cpu_to_le16(1);
iter->fi.lengthFileIdent = namelen;
iter->fi.lengthOfImpUse = cpu_to_le16(0);
memcpy(iter->namebuf, name, namelen);
iter->name = iter->namebuf;
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
dinfo->i_lenAlloc += nfidlen;
} else {
/* Truncate last extent to proper size */
udf_fiiter_update_elen(iter, iter->elen -
(dinfo->i_lenExtents - dir->i_size));
}
mark_inode_dirty(dir);
return 0;
}
static void udf_fiiter_delete_entry(struct udf_fileident_iter *iter)
{
iter->fi.fileCharacteristics |= FID_FILE_CHAR_DELETED;
if (UDF_QUERY_FLAG(iter->dir->i_sb, UDF_FLAG_STRICT))
memset(&iter->fi.icb, 0x00, sizeof(struct long_ad));
udf_fiiter_write_fi(iter, NULL);
}
static void udf_add_fid_counter(struct super_block *sb, bool dir, int val)
{
struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
if (!lvidiu)
return;
mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
if (dir)
le32_add_cpu(&lvidiu->numDirs, val);
else
le32_add_cpu(&lvidiu->numFiles, val);
udf_updated_lvid(sb);
mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
}
static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
struct inode *dir = d_inode(dentry->d_parent);
struct udf_fileident_iter iter;
int err;
err = udf_fiiter_add_entry(dir, dentry, &iter);
if (err) {
inode_dec_link_count(inode);
discard_new_inode(inode);
return err;
}
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
iter.fi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
udf_fiiter_write_fi(&iter, NULL);
dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, false, 1);
d_instantiate_new(dentry, inode);
return 0;
}
static int udf_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
return udf_add_nondir(dentry, inode);
}
static int udf_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
struct file *file, umode_t mode)
{
struct inode *inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_data.a_ops = &udf_aops;
inode->i_op = &udf_file_inode_operations;
inode->i_fop = &udf_file_operations;
mark_inode_dirty(inode);
d_tmpfile(file, inode);
unlock_new_inode(inode);
return finish_open_simple(file, 0);
}
static int udf_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode *inode;
if (!old_valid_dev(rdev))
return -EINVAL;
inode = udf_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
init_special_inode(inode, mode, rdev);
return udf_add_nondir(dentry, inode);
}
static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct udf_fileident_iter iter;
int err;
struct udf_inode_info *dinfo = UDF_I(dir);
struct udf_inode_info *iinfo;
inode = udf_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
iinfo = UDF_I(inode);
inode->i_op = &udf_dir_inode_operations;
inode->i_fop = &udf_dir_operations;
err = udf_fiiter_add_entry(inode, NULL, &iter);
if (err) {
clear_nlink(inode);
discard_new_inode(inode);
return err;
}
set_nlink(inode, 2);
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
iter.fi.icb.extLocation = cpu_to_lelb(dinfo->i_location);
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(dinfo->i_unique & 0x00000000FFFFFFFFUL);
iter.fi.fileCharacteristics =
FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT;
udf_fiiter_write_fi(&iter, NULL);
udf_fiiter_release(&iter);
mark_inode_dirty(inode);
err = udf_fiiter_add_entry(dir, dentry, &iter);
if (err) {
clear_nlink(inode);
discard_new_inode(inode);
return err;
}
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
iter.fi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
iter.fi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
udf_fiiter_write_fi(&iter, NULL);
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, true, 1);
inc_nlink(dir);
dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
d_instantiate_new(dentry, inode);
return 0;
}
static int empty_dir(struct inode *dir)
{
struct udf_fileident_iter iter;
int ret;
for (ret = udf_fiiter_init(&iter, dir, 0);
!ret && iter.pos < dir->i_size;
ret = udf_fiiter_advance(&iter)) {
if (iter.fi.lengthFileIdent &&
!(iter.fi.fileCharacteristics & FID_FILE_CHAR_DELETED)) {
udf_fiiter_release(&iter);
return 0;
}
}
udf_fiiter_release(&iter);
return 1;
}
static int udf_rmdir(struct inode *dir, struct dentry *dentry)
{
int ret;
struct inode *inode = d_inode(dentry);
struct udf_fileident_iter iter;
struct kernel_lb_addr tloc;
ret = udf_fiiter_find_entry(dir, &dentry->d_name, &iter);
if (ret)
goto out;
ret = -EFSCORRUPTED;
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino)
goto end_rmdir;
ret = -ENOTEMPTY;
if (!empty_dir(inode))
goto end_rmdir;
udf_fiiter_delete_entry(&iter);
if (inode->i_nlink != 2)
udf_warn(inode->i_sb, "empty directory has nlink != 2 (%u)\n",
inode->i_nlink);
clear_nlink(inode);
inode->i_size = 0;
inode_dec_link_count(dir);
udf_add_fid_counter(dir->i_sb, true, -1);
dir->i_mtime = inode_set_ctime_to_ts(dir,
inode_set_ctime_current(inode));
mark_inode_dirty(dir);
ret = 0;
end_rmdir:
udf_fiiter_release(&iter);
out:
return ret;
}
static int udf_unlink(struct inode *dir, struct dentry *dentry)
{
int ret;
struct inode *inode = d_inode(dentry);
struct udf_fileident_iter iter;
struct kernel_lb_addr tloc;
ret = udf_fiiter_find_entry(dir, &dentry->d_name, &iter);
if (ret)
goto out;
ret = -EFSCORRUPTED;
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino)
goto end_unlink;
if (!inode->i_nlink) {
udf_debug("Deleting nonexistent file (%lu), %u\n",
inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
udf_fiiter_delete_entry(&iter);
dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
inode_dec_link_count(inode);
udf_add_fid_counter(dir->i_sb, false, -1);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
ret = 0;
end_unlink:
udf_fiiter_release(&iter);
out:
return ret;
}
static int udf_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct inode *inode = udf_new_inode(dir, S_IFLNK | 0777);
struct pathComponent *pc;
const char *compstart;
struct extent_position epos = {};
int eoffset, elen = 0;
uint8_t *ea;
int err;
udf_pblk_t block;
unsigned char *name = NULL;
int namelen;
struct udf_inode_info *iinfo;
struct super_block *sb = dir->i_sb;
if (IS_ERR(inode))
return PTR_ERR(inode);
iinfo = UDF_I(inode);
down_write(&iinfo->i_data_sem);
name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto out_no_entry;
}
inode->i_data.a_ops = &udf_symlink_aops;
inode->i_op = &udf_symlink_inode_operations;
inode_nohighmem(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
struct kernel_lb_addr eloc;
uint32_t bsize;
block = udf_new_block(sb, inode,
iinfo->i_location.partitionReferenceNum,
iinfo->i_location.logicalBlockNum, &err);
if (!block)
goto out_no_entry;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
epos.bh = NULL;
eloc.logicalBlockNum = block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
bsize = sb->s_blocksize;
iinfo->i_lenExtents = bsize;
err = udf_add_aext(inode, &epos, &eloc, bsize, 0);
brelse(epos.bh);
if (err < 0) {
udf_free_blocks(sb, inode, &eloc, 0, 1);
goto out_no_entry;
}
block = udf_get_pblock(sb, block,
iinfo->i_location.partitionReferenceNum,
0);
epos.bh = sb_getblk(sb, block);
if (unlikely(!epos.bh)) {
err = -ENOMEM;
udf_free_blocks(sb, inode, &eloc, 0, 1);
goto out_no_entry;
}
lock_buffer(epos.bh);
memset(epos.bh->b_data, 0x00, bsize);
set_buffer_uptodate(epos.bh);
unlock_buffer(epos.bh);
mark_buffer_dirty_inode(epos.bh, inode);
ea = epos.bh->b_data + udf_ext0_offset(inode);
} else
ea = iinfo->i_data + iinfo->i_lenEAttr;
eoffset = sb->s_blocksize - udf_ext0_offset(inode);
pc = (struct pathComponent *)ea;
if (*symname == '/') {
do {
symname++;
} while (*symname == '/');
pc->componentType = 1;
pc->lengthComponentIdent = 0;
pc->componentFileVersionNum = 0;
elen += sizeof(struct pathComponent);
}
err = -ENAMETOOLONG;
while (*symname) {
if (elen + sizeof(struct pathComponent) > eoffset)
goto out_no_entry;
pc = (struct pathComponent *)(ea + elen);
compstart = symname;
do {
symname++;
} while (*symname && *symname != '/');
pc->componentType = 5;
pc->lengthComponentIdent = 0;
pc->componentFileVersionNum = 0;
if (compstart[0] == '.') {
if ((symname - compstart) == 1)
pc->componentType = 4;
else if ((symname - compstart) == 2 &&
compstart[1] == '.')
pc->componentType = 3;
}
if (pc->componentType == 5) {
namelen = udf_put_filename(sb, compstart,
symname - compstart,
name, UDF_NAME_LEN_CS0);
if (!namelen)
goto out_no_entry;
if (elen + sizeof(struct pathComponent) + namelen >
eoffset)
goto out_no_entry;
else
pc->lengthComponentIdent = namelen;
memcpy(pc->componentIdent, name, namelen);
}
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
if (*symname) {
do {
symname++;
} while (*symname == '/');
}
}
brelse(epos.bh);
inode->i_size = elen;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
iinfo->i_lenAlloc = inode->i_size;
else
udf_truncate_tail_extent(inode);
mark_inode_dirty(inode);
up_write(&iinfo->i_data_sem);
err = udf_add_nondir(dentry, inode);
out:
kfree(name);
return err;
out_no_entry:
up_write(&iinfo->i_data_sem);
inode_dec_link_count(inode);
discard_new_inode(inode);
goto out;
}
static int udf_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
struct udf_fileident_iter iter;
int err;
err = udf_fiiter_add_entry(dir, dentry, &iter);
if (err)
return err;
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
iter.fi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
if (UDF_SB(inode->i_sb)->s_lvid_bh) {
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(lvid_get_unique_id(inode->i_sb));
}
udf_fiiter_write_fi(&iter, NULL);
udf_fiiter_release(&iter);
inc_nlink(inode);
udf_add_fid_counter(dir->i_sb, false, 1);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
ihold(inode);
d_instantiate(dentry, inode);
return 0;
}
/* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct inode *old_inode = d_inode(old_dentry);
struct inode *new_inode = d_inode(new_dentry);
struct udf_fileident_iter oiter, niter, diriter;
bool has_diriter = false;
int retval;
struct kernel_lb_addr tloc;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
retval = udf_fiiter_find_entry(old_dir, &old_dentry->d_name, &oiter);
if (retval)
return retval;
tloc = lelb_to_cpu(oiter.fi.icb.extLocation);
if (udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino) {
retval = -ENOENT;
goto out_oiter;
}
if (S_ISDIR(old_inode->i_mode)) {
if (new_inode) {
retval = -ENOTEMPTY;
if (!empty_dir(new_inode))
goto out_oiter;
}
retval = udf_fiiter_find_entry(old_inode, &dotdot_name,
&diriter);
if (retval == -ENOENT) {
udf_err(old_inode->i_sb,
"directory (ino %lu) has no '..' entry\n",
old_inode->i_ino);
retval = -EFSCORRUPTED;
}
if (retval)
goto out_oiter;
has_diriter = true;
tloc = lelb_to_cpu(diriter.fi.icb.extLocation);
if (udf_get_lb_pblock(old_inode->i_sb, &tloc, 0) !=
old_dir->i_ino) {
retval = -EFSCORRUPTED;
udf_err(old_inode->i_sb,
"directory (ino %lu) has parent entry pointing to another inode (%lu != %u)\n",
old_inode->i_ino, old_dir->i_ino,
udf_get_lb_pblock(old_inode->i_sb, &tloc, 0));
goto out_oiter;
}
}
retval = udf_fiiter_find_entry(new_dir, &new_dentry->d_name, &niter);
if (retval && retval != -ENOENT)
goto out_oiter;
/* Entry found but not passed by VFS? */
if (!retval && !new_inode) {
retval = -EFSCORRUPTED;
udf_fiiter_release(&niter);
goto out_oiter;
}
/* Entry not found? Need to add one... */
if (retval) {
udf_fiiter_release(&niter);
retval = udf_fiiter_add_entry(new_dir, new_dentry, &niter);
if (retval)
goto out_oiter;
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
/*
* ok, that's it
*/
niter.fi.fileVersionNum = oiter.fi.fileVersionNum;
niter.fi.fileCharacteristics = oiter.fi.fileCharacteristics;
memcpy(&(niter.fi.icb), &(oiter.fi.icb), sizeof(oiter.fi.icb));
udf_fiiter_write_fi(&niter, NULL);
udf_fiiter_release(&niter);
/*
* The old entry may have moved due to new entry allocation. Find it
* again.
*/
udf_fiiter_release(&oiter);
retval = udf_fiiter_find_entry(old_dir, &old_dentry->d_name, &oiter);
if (retval) {
udf_err(old_dir->i_sb,
"failed to find renamed entry again in directory (ino %lu)\n",
old_dir->i_ino);
} else {
udf_fiiter_delete_entry(&oiter);
udf_fiiter_release(&oiter);
}
if (new_inode) {
inode_set_ctime_current(new_inode);
inode_dec_link_count(new_inode);
udf_add_fid_counter(old_dir->i_sb, S_ISDIR(new_inode->i_mode),
-1);
}
old_dir->i_mtime = inode_set_ctime_current(old_dir);
new_dir->i_mtime = inode_set_ctime_current(new_dir);
mark_inode_dirty(old_dir);
mark_inode_dirty(new_dir);
if (has_diriter) {
diriter.fi.icb.extLocation =
cpu_to_lelb(UDF_I(new_dir)->i_location);
udf_update_tag((char *)&diriter.fi,
udf_dir_entry_len(&diriter.fi));
udf_fiiter_write_fi(&diriter, NULL);
udf_fiiter_release(&diriter);
inode_dec_link_count(old_dir);
if (new_inode)
inode_dec_link_count(new_inode);
else {
inc_nlink(new_dir);
mark_inode_dirty(new_dir);
}
}
return 0;
out_oiter:
if (has_diriter)
udf_fiiter_release(&diriter);
udf_fiiter_release(&oiter);
return retval;
}
static struct dentry *udf_get_parent(struct dentry *child)
{
struct kernel_lb_addr tloc;
struct inode *inode = NULL;
struct udf_fileident_iter iter;
int err;
err = udf_fiiter_find_entry(d_inode(child), &dotdot_name, &iter);
if (err)
return ERR_PTR(err);
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
udf_fiiter_release(&iter);
inode = udf_iget(child->d_sb, &tloc);
if (IS_ERR(inode))
return ERR_CAST(inode);
return d_obtain_alias(inode);
}
static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
u16 partref, __u32 generation)
{
struct inode *inode;
struct kernel_lb_addr loc;
if (block == 0)
return ERR_PTR(-ESTALE);
loc.logicalBlockNum = block;
loc.partitionReferenceNum = partref;
inode = udf_iget(sb, &loc);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return d_obtain_alias(inode);
}
static struct dentry *udf_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if (fh_len < 3 ||
(fh_type != FILEID_UDF_WITH_PARENT &&
fh_type != FILEID_UDF_WITHOUT_PARENT))
return NULL;
return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref,
fid->udf.generation);
}
static struct dentry *udf_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if (fh_len < 5 || fh_type != FILEID_UDF_WITH_PARENT)
return NULL;
return udf_nfs_get_inode(sb, fid->udf.parent_block,
fid->udf.parent_partref,
fid->udf.parent_generation);
}
static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
struct inode *parent)
{
int len = *lenp;
struct kernel_lb_addr location = UDF_I(inode)->i_location;
struct fid *fid = (struct fid *)fh;
int type = FILEID_UDF_WITHOUT_PARENT;
if (parent && (len < 5)) {
*lenp = 5;
return FILEID_INVALID;
} else if (len < 3) {
*lenp = 3;
return FILEID_INVALID;
}
*lenp = 3;
fid->udf.block = location.logicalBlockNum;
fid->udf.partref = location.partitionReferenceNum;
fid->udf.parent_partref = 0;
fid->udf.generation = inode->i_generation;
if (parent) {
location = UDF_I(parent)->i_location;
fid->udf.parent_block = location.logicalBlockNum;
fid->udf.parent_partref = location.partitionReferenceNum;
fid->udf.parent_generation = inode->i_generation;
*lenp = 5;
type = FILEID_UDF_WITH_PARENT;
}
return type;
}
const struct export_operations udf_export_ops = {
.encode_fh = udf_encode_fh,
.fh_to_dentry = udf_fh_to_dentry,
.fh_to_parent = udf_fh_to_parent,
.get_parent = udf_get_parent,
};
const struct inode_operations udf_dir_inode_operations = {
.lookup = udf_lookup,
.create = udf_create,
.link = udf_link,
.unlink = udf_unlink,
.symlink = udf_symlink,
.mkdir = udf_mkdir,
.rmdir = udf_rmdir,
.mknod = udf_mknod,
.rename = udf_rename,
.tmpfile = udf_tmpfile,
};
| linux-master | fs/udf/namei.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* symlink.c
*
* PURPOSE
* Symlink handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* (C) 1998-2001 Ben Fennema
* (C) 1999 Stelias Computing Inc
*
* HISTORY
*
* 04/16/99 blf Created.
*
*/
#include "udfdecl.h"
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/pagemap.h>
#include "udf_i.h"
static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
int fromlen, unsigned char *to, int tolen)
{
struct pathComponent *pc;
int elen = 0;
int comp_len;
unsigned char *p = to;
/* Reserve one byte for terminating \0 */
tolen--;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
elen += sizeof(struct pathComponent);
switch (pc->componentType) {
case 1:
/*
* Symlink points to some place which should be agreed
* upon between originator and receiver of the media. Ignore.
*/
if (pc->lengthComponentIdent > 0) {
elen += pc->lengthComponentIdent;
break;
}
fallthrough;
case 2:
if (tolen == 0)
return -ENAMETOOLONG;
p = to;
*p++ = '/';
tolen--;
break;
case 3:
if (tolen < 3)
return -ENAMETOOLONG;
memcpy(p, "../", 3);
p += 3;
tolen -= 3;
break;
case 4:
if (tolen < 2)
return -ENAMETOOLONG;
memcpy(p, "./", 2);
p += 2;
tolen -= 2;
/* that would be . - just ignore */
break;
case 5:
elen += pc->lengthComponentIdent;
if (elen > fromlen)
return -EIO;
comp_len = udf_get_filename(sb, pc->componentIdent,
pc->lengthComponentIdent,
p, tolen);
if (comp_len < 0)
return comp_len;
p += comp_len;
tolen -= comp_len;
if (tolen == 0)
return -ENAMETOOLONG;
*p++ = '/';
tolen--;
break;
}
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
return 0;
}
static int udf_symlink_filler(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err = 0;
unsigned char *p = page_address(page);
struct udf_inode_info *iinfo = UDF_I(inode);
/* We don't support symlinks longer than one block */
if (inode->i_size > inode->i_sb->s_blocksize) {
err = -ENAMETOOLONG;
goto out_unlock;
}
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_data + iinfo->i_lenEAttr;
} else {
bh = udf_bread(inode, 0, 0, &err);
if (!bh) {
if (!err)
err = -EFSCORRUPTED;
goto out_err;
}
symlink = bh->b_data;
}
err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
brelse(bh);
if (err)
goto out_err;
SetPageUptodate(page);
unlock_page(page);
return 0;
out_err:
SetPageError(page);
out_unlock:
unlock_page(page);
return err;
}
static int udf_symlink_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct inode *inode = d_backing_inode(dentry);
struct page *page;
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
page = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
/*
* UDF uses non-trivial encoding of symlinks so i_size does not match
* number of characters reported by readlink(2) which apparently some
* applications expect. Also POSIX says that "The value returned in the
* st_size field shall be the length of the contents of the symbolic
* link, and shall not count a trailing null if one is present." So
* let's report the length of string returned by readlink(2) for
* st_size.
*/
stat->size = strlen(page_address(page));
put_page(page);
return 0;
}
/*
* symlinks can't do much...
*/
const struct address_space_operations udf_symlink_aops = {
.read_folio = udf_symlink_filler,
};
const struct inode_operations udf_symlink_inode_operations = {
.get_link = page_get_link,
.getattr = udf_symlink_getattr,
};
| linux-master | fs/udf/symlink.c |
// SPDX-License-Identifier: LGPL-2.0+
/* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Eggert ([email protected]). */
/*
* dgb 10/02/98: ripped this from glibc source to help convert timestamps
* to unix time
* 10/04/98: added new table-based lookup after seeing how ugly
* the gnu code is
* blf 09/27/99: ripped out all the old code and inserted new table from
* John Brockmeyer (without leap second corrections)
* rewrote udf_stamp_to_time and fixed timezone accounting in
* udf_time_to_stamp.
*/
/*
* We don't take into account leap seconds. This may be correct or incorrect.
* For more NIST information (especially dealing with leap seconds), see:
* http://www.boulder.nist.gov/timefreq/pubs/bulletin/leapsecond.htm
*/
#include "udfdecl.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/time.h>
void
udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
{
u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
u16 year = le16_to_cpu(src.year);
uint8_t type = typeAndTimezone >> 12;
int16_t offset;
if (type == 1) {
offset = typeAndTimezone << 4;
/* sign extent offset */
offset = (offset >> 4);
if (offset == -2047) /* unspecified offset */
offset = 0;
} else
offset = 0;
dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
src.second);
dest->tv_sec -= offset * 60;
dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
src.hundredsOfMicroseconds * 100 + src.microseconds);
/*
* Sanitize nanosecond field since reportedly some filesystems are
* recorded with bogus sub-second values.
*/
dest->tv_nsec %= NSEC_PER_SEC;
}
void
udf_time_to_disk_stamp(struct timestamp *dest, struct timespec64 ts)
{
time64_t seconds;
int16_t offset;
struct tm tm;
offset = -sys_tz.tz_minuteswest;
dest->typeAndTimezone = cpu_to_le16(0x1000 | (offset & 0x0FFF));
seconds = ts.tv_sec + offset * 60;
time64_to_tm(seconds, 0, &tm);
dest->year = cpu_to_le16(tm.tm_year + 1900);
dest->month = tm.tm_mon + 1;
dest->day = tm.tm_mday;
dest->hour = tm.tm_hour;
dest->minute = tm.tm_min;
dest->second = tm.tm_sec;
dest->centiseconds = ts.tv_nsec / 10000000;
dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 -
dest->centiseconds * 10000) / 100;
dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 -
dest->hundredsOfMicroseconds * 100);
}
/* EOF */
| linux-master | fs/udf/udftime.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* file.c
*
* PURPOSE
* File handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* (C) 1998-1999 Dave Boynton
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 10/02/98 dgb Attempt to integrate into udf.o
* 10/07/98 Switched to using generic_readpage, etc., like isofs
* And it works!
* 12/06/98 blf Added udf_file_read. uses generic_file_read for all cases but
* ICBTAG_FLAG_AD_IN_ICB.
* 04/06/99 64 bit file handling on 32 bit systems taken from ext2 file.c
* 05/12/99 Preliminary file write support
*/
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/string.h> /* memset */
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
#include "udf_i.h"
#include "udf_sb.h"
static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct address_space *mapping = inode->i_mapping;
struct page *page = vmf->page;
loff_t size;
unsigned int end;
vm_fault_t ret = VM_FAULT_LOCKED;
int err;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
filemap_invalidate_lock_shared(mapping);
lock_page(page);
size = i_size_read(inode);
if (page->mapping != inode->i_mapping || page_offset(page) >= size) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
/* Space is already allocated for in-ICB file */
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
goto out_dirty;
if (page->index == size >> PAGE_SHIFT)
end = size & ~PAGE_MASK;
else
end = PAGE_SIZE;
err = __block_write_begin(page, 0, end, udf_get_block);
if (err) {
unlock_page(page);
ret = vmf_fs_error(err);
goto out_unlock;
}
block_commit_write(page, 0, end);
out_dirty:
set_page_dirty(page);
wait_for_stable_page(page);
out_unlock:
filemap_invalidate_unlock_shared(mapping);
sb_end_pagefault(inode->i_sb);
return ret;
}
static const struct vm_operations_struct udf_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = udf_page_mkwrite,
};
static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
ssize_t retval;
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct udf_inode_info *iinfo = UDF_I(inode);
inode_lock(inode);
retval = generic_write_checks(iocb, from);
if (retval <= 0)
goto out;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
iocb->ki_pos + iov_iter_count(from))) {
filemap_invalidate_lock(inode->i_mapping);
retval = udf_expand_file_adinicb(inode);
filemap_invalidate_unlock(inode->i_mapping);
if (retval)
goto out;
}
retval = __generic_file_write_iter(iocb, from);
out:
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) {
down_write(&iinfo->i_data_sem);
iinfo->i_lenAlloc = inode->i_size;
up_write(&iinfo->i_data_sem);
}
inode_unlock(inode);
if (retval > 0) {
mark_inode_dirty(inode);
retval = generic_write_sync(iocb, retval);
}
return retval;
}
long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
long old_block, new_block;
int result;
if (file_permission(filp, MAY_READ) != 0) {
udf_debug("no permission to access inode %lu\n", inode->i_ino);
return -EPERM;
}
if (!arg && ((cmd == UDF_GETVOLIDENT) || (cmd == UDF_GETEASIZE) ||
(cmd == UDF_RELOCATE_BLOCKS) || (cmd == UDF_GETEABLOCK))) {
udf_debug("invalid argument to udf_ioctl\n");
return -EINVAL;
}
switch (cmd) {
case UDF_GETVOLIDENT:
if (copy_to_user((char __user *)arg,
UDF_SB(inode->i_sb)->s_volume_ident, 32))
return -EFAULT;
return 0;
case UDF_RELOCATE_BLOCKS:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(old_block, (long __user *)arg))
return -EFAULT;
result = udf_relocate_blocks(inode->i_sb,
old_block, &new_block);
if (result == 0)
result = put_user(new_block, (long __user *)arg);
return result;
case UDF_GETEASIZE:
return put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg);
case UDF_GETEABLOCK:
return copy_to_user((char __user *)arg,
UDF_I(inode)->i_data,
UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE &&
atomic_read(&inode->i_writecount) == 1) {
/*
* Grab i_mutex to avoid races with writes changing i_size
* while we are running.
*/
inode_lock(inode);
down_write(&UDF_I(inode)->i_data_sem);
udf_discard_prealloc(inode);
udf_truncate_tail_extent(inode);
up_write(&UDF_I(inode)->i_data_sem);
inode_unlock(inode);
}
return 0;
}
static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &udf_file_vm_ops;
return 0;
}
const struct file_operations udf_file_operations = {
.read_iter = generic_file_read_iter,
.unlocked_ioctl = udf_ioctl,
.open = generic_file_open,
.mmap = udf_file_mmap,
.write_iter = udf_file_write_iter,
.release = udf_release_file,
.fsync = generic_file_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = generic_file_llseek,
};
static int udf_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
int error;
error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (error)
return error;
if ((attr->ia_valid & ATTR_UID) &&
UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
!uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
return -EPERM;
if ((attr->ia_valid & ATTR_GID) &&
UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
!gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
return -EPERM;
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
error = udf_setsize(inode, attr->ia_size);
if (error)
return error;
}
if (attr->ia_valid & ATTR_MODE)
udf_update_extra_perms(inode, attr->ia_mode);
setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
const struct inode_operations udf_file_inode_operations = {
.setattr = udf_setattr,
};
| linux-master | fs/udf/file.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ialloc.c
*
* PURPOSE
* Inode allocation handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* (C) 1998-2001 Ben Fennema
*
* HISTORY
*
* 02/24/99 blf Created.
*
*/
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "udf_i.h"
#include "udf_sb.h"
void udf_free_inode(struct inode *inode)
{
udf_free_blocks(inode->i_sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
struct inode *udf_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct inode *inode;
udf_pblk_t block;
uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
struct udf_inode_info *iinfo;
struct udf_inode_info *dinfo = UDF_I(dir);
int err;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
iinfo = UDF_I(inode);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
iinfo->i_efe = 1;
if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
iinfo->i_data = kzalloc(inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry),
GFP_KERNEL);
} else {
iinfo->i_efe = 0;
iinfo->i_data = kzalloc(inode->i_sb->s_blocksize -
sizeof(struct fileEntry),
GFP_KERNEL);
}
if (!iinfo->i_data) {
make_bad_inode(inode);
iput(inode);
return ERR_PTR(-ENOMEM);
}
err = -ENOSPC;
block = udf_new_block(dir->i_sb, NULL,
dinfo->i_location.partitionReferenceNum,
start, &err);
if (err) {
make_bad_inode(inode);
iput(inode);
return ERR_PTR(err);
}
iinfo->i_unique = lvid_get_unique_id(sb);
inode->i_generation = iinfo->i_unique;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
inode->i_uid = sbi->s_uid;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
inode->i_gid = sbi->s_gid;
iinfo->i_location.logicalBlockNum = block;
iinfo->i_location.partitionReferenceNum =
dinfo->i_location.partitionReferenceNum;
inode->i_ino = udf_get_lb_pblock(sb, &iinfo->i_location, 0);
inode->i_blocks = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_use = 0;
iinfo->i_checkpoint = 1;
iinfo->i_extraPerms = FE_PERM_U_CHATTR;
udf_update_extra_perms(inode, mode);
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
iinfo->i_crtime = inode->i_mtime;
if (unlikely(insert_inode_locked(inode) < 0)) {
make_bad_inode(inode);
iput(inode);
return ERR_PTR(-EIO);
}
mark_inode_dirty(inode);
return inode;
}
| linux-master | fs/udf/ialloc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* misc.c
*
* PURPOSE
* Miscellaneous routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* (C) 1998 Dave Boynton
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 04/19/99 blf partial support for reading/writing specific EA's
*/
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/crc-itu-t.h>
#include "udf_i.h"
#include "udf_sb.h"
struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
uint32_t type, uint8_t loc)
{
uint8_t *ea = NULL, *ad = NULL;
int offset;
uint16_t crclen;
struct udf_inode_info *iinfo = UDF_I(inode);
ea = iinfo->i_data;
if (iinfo->i_lenEAttr) {
ad = iinfo->i_data + iinfo->i_lenEAttr;
} else {
ad = ea;
size += sizeof(struct extendedAttrHeaderDesc);
}
offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) -
iinfo->i_lenAlloc;
/* TODO - Check for FreeEASpace */
if (loc & 0x01 && offset >= size) {
struct extendedAttrHeaderDesc *eahd;
eahd = (struct extendedAttrHeaderDesc *)ea;
if (iinfo->i_lenAlloc)
memmove(&ad[size], ad, iinfo->i_lenAlloc);
if (iinfo->i_lenEAttr) {
/* check checksum/crc */
if (eahd->descTag.tagIdent !=
cpu_to_le16(TAG_IDENT_EAHD) ||
le32_to_cpu(eahd->descTag.tagLocation) !=
iinfo->i_location.logicalBlockNum)
return NULL;
} else {
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
size -= sizeof(struct extendedAttrHeaderDesc);
iinfo->i_lenEAttr +=
sizeof(struct extendedAttrHeaderDesc);
eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
if (sbi->s_udfrev >= 0x0200)
eahd->descTag.descVersion = cpu_to_le16(3);
else
eahd->descTag.descVersion = cpu_to_le16(2);
eahd->descTag.tagSerialNum =
cpu_to_le16(sbi->s_serial_number);
eahd->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.logicalBlockNum);
eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
}
offset = iinfo->i_lenEAttr;
if (type < 2048) {
if (le32_to_cpu(eahd->appAttrLocation) <
iinfo->i_lenEAttr) {
uint32_t aal =
le32_to_cpu(eahd->appAttrLocation);
memmove(&ea[offset - aal + size],
&ea[aal], offset - aal);
offset -= aal;
eahd->appAttrLocation =
cpu_to_le32(aal + size);
}
if (le32_to_cpu(eahd->impAttrLocation) <
iinfo->i_lenEAttr) {
uint32_t ial =
le32_to_cpu(eahd->impAttrLocation);
memmove(&ea[offset - ial + size],
&ea[ial], offset - ial);
offset -= ial;
eahd->impAttrLocation =
cpu_to_le32(ial + size);
}
} else if (type < 65536) {
if (le32_to_cpu(eahd->appAttrLocation) <
iinfo->i_lenEAttr) {
uint32_t aal =
le32_to_cpu(eahd->appAttrLocation);
memmove(&ea[offset - aal + size],
&ea[aal], offset - aal);
offset -= aal;
eahd->appAttrLocation =
cpu_to_le32(aal + size);
}
}
/* rewrite CRC + checksum of eahd */
crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(struct tag);
eahd->descTag.descCRCLength = cpu_to_le16(crclen);
eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd +
sizeof(struct tag), crclen));
eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
iinfo->i_lenEAttr += size;
return (struct genericFormat *)&ea[offset];
}
return NULL;
}
struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
uint8_t subtype)
{
struct genericFormat *gaf;
uint8_t *ea = NULL;
uint32_t offset;
struct udf_inode_info *iinfo = UDF_I(inode);
ea = iinfo->i_data;
if (iinfo->i_lenEAttr) {
struct extendedAttrHeaderDesc *eahd;
eahd = (struct extendedAttrHeaderDesc *)ea;
/* check checksum/crc */
if (eahd->descTag.tagIdent !=
cpu_to_le16(TAG_IDENT_EAHD) ||
le32_to_cpu(eahd->descTag.tagLocation) !=
iinfo->i_location.logicalBlockNum)
return NULL;
if (type < 2048)
offset = sizeof(struct extendedAttrHeaderDesc);
else if (type < 65536)
offset = le32_to_cpu(eahd->impAttrLocation);
else
offset = le32_to_cpu(eahd->appAttrLocation);
while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
uint32_t attrLength;
gaf = (struct genericFormat *)&ea[offset];
attrLength = le32_to_cpu(gaf->attrLength);
/* Detect undersized elements and buffer overflows */
if ((attrLength < sizeof(*gaf)) ||
(attrLength > (iinfo->i_lenEAttr - offset)))
break;
if (le32_to_cpu(gaf->attrType) == type &&
gaf->attrSubtype == subtype)
return gaf;
else
offset += attrLength;
}
}
return NULL;
}
/*
* udf_read_tagged
*
* PURPOSE
* Read the first block of a tagged descriptor.
*
* HISTORY
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
uint32_t location, uint16_t *ident)
{
struct tag *tag_p;
struct buffer_head *bh = NULL;
u8 checksum;
/* Read the block */
if (block == 0xFFFFFFFF)
return NULL;
bh = sb_bread(sb, block);
if (!bh) {
udf_err(sb, "read failed, block=%u, location=%u\n",
block, location);
return NULL;
}
tag_p = (struct tag *)(bh->b_data);
*ident = le16_to_cpu(tag_p->tagIdent);
if (location != le32_to_cpu(tag_p->tagLocation)) {
udf_debug("location mismatch block %u, tag %u != %u\n",
block, le32_to_cpu(tag_p->tagLocation), location);
goto error_out;
}
/* Verify the tag checksum */
checksum = udf_tag_checksum(tag_p);
if (checksum != tag_p->tagChecksum) {
udf_err(sb, "tag checksum failed, block %u: 0x%02x != 0x%02x\n",
block, checksum, tag_p->tagChecksum);
goto error_out;
}
/* Verify the tag version */
if (tag_p->descVersion != cpu_to_le16(0x0002U) &&
tag_p->descVersion != cpu_to_le16(0x0003U)) {
udf_err(sb, "tag version 0x%04x != 0x0002 || 0x0003, block %u\n",
le16_to_cpu(tag_p->descVersion), block);
goto error_out;
}
/* Verify the descriptor CRC */
if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize ||
le16_to_cpu(tag_p->descCRC) == crc_itu_t(0,
bh->b_data + sizeof(struct tag),
le16_to_cpu(tag_p->descCRCLength)))
return bh;
udf_debug("Crc failure block %u: crc = %u, crclen = %u\n", block,
le16_to_cpu(tag_p->descCRC),
le16_to_cpu(tag_p->descCRCLength));
error_out:
brelse(bh);
return NULL;
}
struct buffer_head *udf_read_ptagged(struct super_block *sb,
struct kernel_lb_addr *loc,
uint32_t offset, uint16_t *ident)
{
return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
loc->logicalBlockNum + offset, ident);
}
void udf_update_tag(char *data, int length)
{
struct tag *tptr = (struct tag *)data;
length -= sizeof(struct tag);
tptr->descCRCLength = cpu_to_le16(length);
tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(struct tag), length));
tptr->tagChecksum = udf_tag_checksum(tptr);
}
void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
uint32_t loc, int length)
{
struct tag *tptr = (struct tag *)data;
tptr->tagIdent = cpu_to_le16(ident);
tptr->descVersion = cpu_to_le16(version);
tptr->tagSerialNum = cpu_to_le16(snum);
tptr->tagLocation = cpu_to_le32(loc);
udf_update_tag(data, length);
}
u8 udf_tag_checksum(const struct tag *t)
{
u8 *data = (u8 *)t;
u8 checksum = 0;
int i;
for (i = 0; i < sizeof(struct tag); ++i)
if (i != 4) /* position of checksum */
checksum += data[i];
return checksum;
}
| linux-master | fs/udf/misc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Neil Brown 2002
* Copyright (C) Christoph Hellwig 2007
*
* This file contains the code mapping from inodes to NFS file handles,
* and for mapping back from file handles to dentries.
*
* For details on why we do all the strange and hairy things in here
* take a look at Documentation/filesystems/nfs/exporting.rst.
*/
#include <linux/exportfs.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/cred.h>
#define dprintk(fmt, args...) pr_debug(fmt, ##args)
static int get_name(const struct path *path, char *name, struct dentry *child);
static int exportfs_get_name(struct vfsmount *mnt, struct dentry *dir,
char *name, struct dentry *child)
{
const struct export_operations *nop = dir->d_sb->s_export_op;
struct path path = {.mnt = mnt, .dentry = dir};
if (nop->get_name)
return nop->get_name(dir, name, child);
else
return get_name(&path, name, child);
}
/*
* Check if the dentry or any of it's aliases is acceptable.
*/
static struct dentry *
find_acceptable_alias(struct dentry *result,
int (*acceptable)(void *context, struct dentry *dentry),
void *context)
{
struct dentry *dentry, *toput = NULL;
struct inode *inode;
if (acceptable(context, result))
return result;
inode = result->d_inode;
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
dget(dentry);
spin_unlock(&inode->i_lock);
if (toput)
dput(toput);
if (dentry != result && acceptable(context, dentry)) {
dput(result);
return dentry;
}
spin_lock(&inode->i_lock);
toput = dentry;
}
spin_unlock(&inode->i_lock);
if (toput)
dput(toput);
return NULL;
}
static bool dentry_connected(struct dentry *dentry)
{
dget(dentry);
while (dentry->d_flags & DCACHE_DISCONNECTED) {
struct dentry *parent = dget_parent(dentry);
dput(dentry);
if (dentry == parent) {
dput(parent);
return false;
}
dentry = parent;
}
dput(dentry);
return true;
}
static void clear_disconnected(struct dentry *dentry)
{
dget(dentry);
while (dentry->d_flags & DCACHE_DISCONNECTED) {
struct dentry *parent = dget_parent(dentry);
WARN_ON_ONCE(IS_ROOT(dentry));
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_DISCONNECTED;
spin_unlock(&dentry->d_lock);
dput(dentry);
dentry = parent;
}
dput(dentry);
}
/*
* Reconnect a directory dentry with its parent.
*
* This can return a dentry, or NULL, or an error.
*
* In the first case the returned dentry is the parent of the given
* dentry, and may itself need to be reconnected to its parent.
*
* In the NULL case, a concurrent VFS operation has either renamed or
* removed this directory. The concurrent operation has reconnected our
* dentry, so we no longer need to.
*/
static struct dentry *reconnect_one(struct vfsmount *mnt,
struct dentry *dentry, char *nbuf)
{
struct dentry *parent;
struct dentry *tmp;
int err;
parent = ERR_PTR(-EACCES);
inode_lock(dentry->d_inode);
if (mnt->mnt_sb->s_export_op->get_parent)
parent = mnt->mnt_sb->s_export_op->get_parent(dentry);
inode_unlock(dentry->d_inode);
if (IS_ERR(parent)) {
dprintk("get_parent of %lu failed, err %ld\n",
dentry->d_inode->i_ino, PTR_ERR(parent));
return parent;
}
dprintk("%s: find name of %lu in %lu\n", __func__,
dentry->d_inode->i_ino, parent->d_inode->i_ino);
err = exportfs_get_name(mnt, parent, nbuf, dentry);
if (err == -ENOENT)
goto out_reconnected;
if (err)
goto out_err;
dprintk("%s: found name: %s\n", __func__, nbuf);
tmp = lookup_one_unlocked(mnt_idmap(mnt), nbuf, parent, strlen(nbuf));
if (IS_ERR(tmp)) {
dprintk("lookup failed: %ld\n", PTR_ERR(tmp));
err = PTR_ERR(tmp);
goto out_err;
}
if (tmp != dentry) {
/*
* Somebody has renamed it since exportfs_get_name();
* great, since it could've only been renamed if it
* got looked up and thus connected, and it would
* remain connected afterwards. We are done.
*/
dput(tmp);
goto out_reconnected;
}
dput(tmp);
if (IS_ROOT(dentry)) {
err = -ESTALE;
goto out_err;
}
return parent;
out_err:
dput(parent);
return ERR_PTR(err);
out_reconnected:
dput(parent);
/*
* Someone must have renamed our entry into another parent, in
* which case it has been reconnected by the rename.
*
* Or someone removed it entirely, in which case filehandle
* lookup will succeed but the directory is now IS_DEAD and
* subsequent operations on it will fail.
*
* Alternatively, maybe there was no race at all, and the
* filesystem is just corrupt and gave us a parent that doesn't
* actually contain any entry pointing to this inode. So,
* double check that this worked and return -ESTALE if not:
*/
if (!dentry_connected(dentry))
return ERR_PTR(-ESTALE);
return NULL;
}
/*
* Make sure target_dir is fully connected to the dentry tree.
*
* On successful return, DCACHE_DISCONNECTED will be cleared on
* target_dir, and target_dir->d_parent->...->d_parent will reach the
* root of the filesystem.
*
* Whenever DCACHE_DISCONNECTED is unset, target_dir is fully connected.
* But the converse is not true: target_dir may have DCACHE_DISCONNECTED
* set but already be connected. In that case we'll verify the
* connection to root and then clear the flag.
*
* Note that target_dir could be removed by a concurrent operation. In
* that case reconnect_path may still succeed with target_dir fully
* connected, but further operations using the filehandle will fail when
* necessary (due to S_DEAD being set on the directory).
*/
static int
reconnect_path(struct vfsmount *mnt, struct dentry *target_dir, char *nbuf)
{
struct dentry *dentry, *parent;
dentry = dget(target_dir);
while (dentry->d_flags & DCACHE_DISCONNECTED) {
BUG_ON(dentry == mnt->mnt_sb->s_root);
if (IS_ROOT(dentry))
parent = reconnect_one(mnt, dentry, nbuf);
else
parent = dget_parent(dentry);
if (!parent)
break;
dput(dentry);
if (IS_ERR(parent))
return PTR_ERR(parent);
dentry = parent;
}
dput(dentry);
clear_disconnected(target_dir);
return 0;
}
struct getdents_callback {
struct dir_context ctx;
char *name; /* name that was found. It already points to a
buffer NAME_MAX+1 is size */
u64 ino; /* the inum we are looking for */
int found; /* inode matched? */
int sequence; /* sequence counter */
};
/*
* A rather strange filldir function to capture
* the name matching the specified inode number.
*/
static bool filldir_one(struct dir_context *ctx, const char *name, int len,
loff_t pos, u64 ino, unsigned int d_type)
{
struct getdents_callback *buf =
container_of(ctx, struct getdents_callback, ctx);
buf->sequence++;
if (buf->ino == ino && len <= NAME_MAX) {
memcpy(buf->name, name, len);
buf->name[len] = '\0';
buf->found = 1;
return false; // no more
}
return true;
}
/**
* get_name - default export_operations->get_name function
* @path: the directory in which to find a name
* @name: a pointer to a %NAME_MAX+1 char buffer to store the name
* @child: the dentry for the child directory.
*
* calls readdir on the parent until it finds an entry with
* the same inode number as the child, and returns that.
*/
static int get_name(const struct path *path, char *name, struct dentry *child)
{
const struct cred *cred = current_cred();
struct inode *dir = path->dentry->d_inode;
int error;
struct file *file;
struct kstat stat;
struct path child_path = {
.mnt = path->mnt,
.dentry = child,
};
struct getdents_callback buffer = {
.ctx.actor = filldir_one,
.name = name,
};
error = -ENOTDIR;
if (!dir || !S_ISDIR(dir->i_mode))
goto out;
error = -EINVAL;
if (!dir->i_fop)
goto out;
/*
* inode->i_ino is unsigned long, kstat->ino is u64, so the
* former would be insufficient on 32-bit hosts when the
* filesystem supports 64-bit inode numbers. So we need to
* actually call ->getattr, not just read i_ino:
*/
error = vfs_getattr_nosec(&child_path, &stat,
STATX_INO, AT_STATX_SYNC_AS_STAT);
if (error)
return error;
buffer.ino = stat.ino;
/*
* Open the directory ...
*/
file = dentry_open(path, O_RDONLY, cred);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
error = -EINVAL;
if (!file->f_op->iterate_shared)
goto out_close;
buffer.sequence = 0;
while (1) {
int old_seq = buffer.sequence;
error = iterate_dir(file, &buffer.ctx);
if (buffer.found) {
error = 0;
break;
}
if (error < 0)
break;
error = -ENOENT;
if (old_seq == buffer.sequence)
break;
}
out_close:
fput(file);
out:
return error;
}
/**
* export_encode_fh - default export_operations->encode_fh function
* @inode: the object to encode
* @fid: where to store the file handle fragment
* @max_len: maximum length to store there
* @parent: parent directory inode, if wanted
*
* This default encode_fh function assumes that the 32 inode number
* is suitable for locating an inode, and that the generation number
* can be used to check that it is still valid. It places them in the
* filehandle fragment where export_decode_fh expects to find them.
*/
static int export_encode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent)
{
int len = *max_len;
int type = FILEID_INO32_GEN;
if (parent && (len < 4)) {
*max_len = 4;
return FILEID_INVALID;
} else if (len < 2) {
*max_len = 2;
return FILEID_INVALID;
}
len = 2;
fid->i32.ino = inode->i_ino;
fid->i32.gen = inode->i_generation;
if (parent) {
fid->i32.parent_ino = parent->i_ino;
fid->i32.parent_gen = parent->i_generation;
len = 4;
type = FILEID_INO32_GEN_PARENT;
}
*max_len = len;
return type;
}
/**
* exportfs_encode_inode_fh - encode a file handle from inode
* @inode: the object to encode
* @fid: where to store the file handle fragment
* @max_len: maximum length to store there
* @parent: parent directory inode, if wanted
* @flags: properties of the requested file handle
*
* Returns an enum fid_type or a negative errno.
*/
int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent, int flags)
{
const struct export_operations *nop = inode->i_sb->s_export_op;
/*
* If a decodeable file handle was requested, we need to make sure that
* filesystem can decode file handles.
*/
if (nop && !(flags & EXPORT_FH_FID) && !nop->fh_to_dentry)
return -EOPNOTSUPP;
if (nop && nop->encode_fh)
return nop->encode_fh(inode, fid->raw, max_len, parent);
return export_encode_fh(inode, fid, max_len, parent);
}
EXPORT_SYMBOL_GPL(exportfs_encode_inode_fh);
/**
* exportfs_encode_fh - encode a file handle from dentry
* @dentry: the object to encode
* @fid: where to store the file handle fragment
* @max_len: maximum length to store there
* @flags: properties of the requested file handle
*
* Returns an enum fid_type or a negative errno.
*/
int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
int flags)
{
int error;
struct dentry *p = NULL;
struct inode *inode = dentry->d_inode, *parent = NULL;
if ((flags & EXPORT_FH_CONNECTABLE) && !S_ISDIR(inode->i_mode)) {
p = dget_parent(dentry);
/*
* note that while p might've ceased to be our parent already,
* it's still pinned by and still positive.
*/
parent = p->d_inode;
}
error = exportfs_encode_inode_fh(inode, fid, max_len, parent, flags);
dput(p);
return error;
}
EXPORT_SYMBOL_GPL(exportfs_encode_fh);
struct dentry *
exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
int fileid_type,
int (*acceptable)(void *, struct dentry *),
void *context)
{
const struct export_operations *nop = mnt->mnt_sb->s_export_op;
struct dentry *result, *alias;
char nbuf[NAME_MAX+1];
int err;
/*
* Try to get any dentry for the given file handle from the filesystem.
*/
if (!nop || !nop->fh_to_dentry)
return ERR_PTR(-ESTALE);
result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
if (IS_ERR_OR_NULL(result))
return result;
/*
* If no acceptance criteria was specified by caller, a disconnected
* dentry is also accepatable. Callers may use this mode to query if
* file handle is stale or to get a reference to an inode without
* risking the high overhead caused by directory reconnect.
*/
if (!acceptable)
return result;
if (d_is_dir(result)) {
/*
* This request is for a directory.
*
* On the positive side there is only one dentry for each
* directory inode. On the negative side this implies that we
* to ensure our dentry is connected all the way up to the
* filesystem root.
*/
if (result->d_flags & DCACHE_DISCONNECTED) {
err = reconnect_path(mnt, result, nbuf);
if (err)
goto err_result;
}
if (!acceptable(context, result)) {
err = -EACCES;
goto err_result;
}
return result;
} else {
/*
* It's not a directory. Life is a little more complicated.
*/
struct dentry *target_dir, *nresult;
/*
* See if either the dentry we just got from the filesystem
* or any alias for it is acceptable. This is always true
* if this filesystem is exported without the subtreecheck
* option. If the filesystem is exported with the subtree
* check option there's a fair chance we need to look at
* the parent directory in the file handle and make sure
* it's connected to the filesystem root.
*/
alias = find_acceptable_alias(result, acceptable, context);
if (alias)
return alias;
/*
* Try to extract a dentry for the parent directory from the
* file handle. If this fails we'll have to give up.
*/
err = -ESTALE;
if (!nop->fh_to_parent)
goto err_result;
target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
fh_len, fileid_type);
if (!target_dir)
goto err_result;
err = PTR_ERR(target_dir);
if (IS_ERR(target_dir))
goto err_result;
/*
* And as usual we need to make sure the parent directory is
* connected to the filesystem root. The VFS really doesn't
* like disconnected directories..
*/
err = reconnect_path(mnt, target_dir, nbuf);
if (err) {
dput(target_dir);
goto err_result;
}
/*
* Now that we've got both a well-connected parent and a
* dentry for the inode we're after, make sure that our
* inode is actually connected to the parent.
*/
err = exportfs_get_name(mnt, target_dir, nbuf, result);
if (err) {
dput(target_dir);
goto err_result;
}
inode_lock(target_dir->d_inode);
nresult = lookup_one(mnt_idmap(mnt), nbuf,
target_dir, strlen(nbuf));
if (!IS_ERR(nresult)) {
if (unlikely(nresult->d_inode != result->d_inode)) {
dput(nresult);
nresult = ERR_PTR(-ESTALE);
}
}
inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
*/
dput(target_dir);
if (IS_ERR(nresult)) {
err = PTR_ERR(nresult);
goto err_result;
}
dput(result);
result = nresult;
/*
* And finally make sure the dentry is actually acceptable
* to NFSD.
*/
alias = find_acceptable_alias(result, acceptable, context);
if (!alias) {
err = -EACCES;
goto err_result;
}
return alias;
}
err_result:
dput(result);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh_raw);
struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
int fh_len, int fileid_type,
int (*acceptable)(void *, struct dentry *),
void *context)
{
struct dentry *ret;
ret = exportfs_decode_fh_raw(mnt, fid, fh_len, fileid_type,
acceptable, context);
if (IS_ERR_OR_NULL(ret)) {
if (ret == ERR_PTR(-ENOMEM))
return ret;
return ERR_PTR(-ESTALE);
}
return ret;
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
MODULE_LICENSE("GPL");
| linux-master | fs/exportfs/expfs.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/quotaops.h>
#include <linux/dqblk_v1.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include "quotaio_v1.h"
MODULE_AUTHOR("Jan Kara");
MODULE_DESCRIPTION("Old quota format support");
MODULE_LICENSE("GPL");
#define QUOTABLOCK_BITS 10
#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
static inline qsize_t v1_stoqb(qsize_t space)
{
return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
}
static inline qsize_t v1_qbtos(qsize_t blocks)
{
return blocks << QUOTABLOCK_BITS;
}
static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d)
{
m->dqb_ihardlimit = d->dqb_ihardlimit;
m->dqb_isoftlimit = d->dqb_isoftlimit;
m->dqb_curinodes = d->dqb_curinodes;
m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit);
m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit);
m->dqb_curspace = v1_qbtos(d->dqb_curblocks);
m->dqb_itime = d->dqb_itime;
m->dqb_btime = d->dqb_btime;
}
static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
{
d->dqb_ihardlimit = m->dqb_ihardlimit;
d->dqb_isoftlimit = m->dqb_isoftlimit;
d->dqb_curinodes = m->dqb_curinodes;
d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit);
d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit);
d->dqb_curblocks = v1_stoqb(m->dqb_curspace);
d->dqb_itime = m->dqb_itime;
d->dqb_btime = m->dqb_btime;
}
static int v1_read_dqblk(struct dquot *dquot)
{
int type = dquot->dq_id.type;
struct v1_disk_dqblk dqblk;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
if (!dqopt->files[type])
return -EINVAL;
/* Set structure to 0s in case read fails/is after end of file */
memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk),
v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id)));
v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
dquot->dq_dqb.dqb_bsoftlimit == 0 &&
dquot->dq_dqb.dqb_ihardlimit == 0 &&
dquot->dq_dqb.dqb_isoftlimit == 0)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
dqstats_inc(DQST_READS);
return 0;
}
static int v1_commit_dqblk(struct dquot *dquot)
{
short type = dquot->dq_id.type;
ssize_t ret;
struct v1_disk_dqblk dqblk;
v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
if (((type == USRQUOTA) && uid_eq(dquot->dq_id.uid, GLOBAL_ROOT_UID)) ||
((type == GRPQUOTA) && gid_eq(dquot->dq_id.gid, GLOBAL_ROOT_GID))) {
dqblk.dqb_btime =
sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
dqblk.dqb_itime =
sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
}
ret = 0;
if (sb_dqopt(dquot->dq_sb)->files[type])
ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
(char *)&dqblk, sizeof(struct v1_disk_dqblk),
v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id)));
if (ret != sizeof(struct v1_disk_dqblk)) {
quota_error(dquot->dq_sb, "dquota write failed");
if (ret >= 0)
ret = -EIO;
goto out;
}
ret = 0;
out:
dqstats_inc(DQST_WRITES);
return ret;
}
/* Magics of new quota format */
#define V2_INITQMAGICS {\
0xd9c01f11, /* USRQUOTA */\
0xd9c01927 /* GRPQUOTA */\
}
/* Header of new quota format */
struct v2_disk_dqheader {
__le32 dqh_magic; /* Magic number identifying file */
__le32 dqh_version; /* File version */
};
static int v1_check_quota_file(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
ulong blocks;
size_t off;
struct v2_disk_dqheader dqhead;
ssize_t size;
loff_t isize;
static const uint quota_magics[] = V2_INITQMAGICS;
isize = i_size_read(inode);
if (!isize)
return 0;
blocks = isize >> BLOCK_SIZE_BITS;
off = isize & (BLOCK_SIZE - 1);
if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
sizeof(struct v1_disk_dqblk))
return 0;
/* Doublecheck whether we didn't get file with new format - with old
* quotactl() this could happen */
size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader))
return 1; /* Probably not new format */
if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
return 1; /* Definitely not new format */
printk(KERN_INFO
"VFS: %s: Refusing to turn on old quota format on given file."
" It probably contains newer quota format.\n", sb->s_id);
return 0; /* Seems like a new format file -> refuse it */
}
static int v1_read_file_info(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
struct v1_disk_dqblk dqblk;
int ret;
down_read(&dqopt->dqio_sem);
ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
}
ret = 0;
/* limits are stored as unsigned 32-bit data */
dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
dqopt->info[type].dqi_igrace =
dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace =
dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
out:
up_read(&dqopt->dqio_sem);
return ret;
}
static int v1_write_file_info(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
struct v1_disk_dqblk dqblk;
int ret;
down_write(&dqopt->dqio_sem);
ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret != sizeof(struct v1_disk_dqblk)) {
if (ret >= 0)
ret = -EIO;
goto out;
}
spin_lock(&dq_data_lock);
dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
dqblk.dqb_itime = dqopt->info[type].dqi_igrace;
dqblk.dqb_btime = dqopt->info[type].dqi_bgrace;
spin_unlock(&dq_data_lock);
ret = sb->s_op->quota_write(sb, type, (char *)&dqblk,
sizeof(struct v1_disk_dqblk), v1_dqoff(0));
if (ret == sizeof(struct v1_disk_dqblk))
ret = 0;
else if (ret >= 0)
ret = -EIO;
out:
up_write(&dqopt->dqio_sem);
return ret;
}
static const struct quota_format_ops v1_format_ops = {
.check_quota_file = v1_check_quota_file,
.read_file_info = v1_read_file_info,
.write_file_info = v1_write_file_info,
.read_dqblk = v1_read_dqblk,
.commit_dqblk = v1_commit_dqblk,
};
static struct quota_format_type v1_quota_format = {
.qf_fmt_id = QFMT_VFS_OLD,
.qf_ops = &v1_format_ops,
.qf_owner = THIS_MODULE
};
static int __init init_v1_quota_format(void)
{
return register_quota_format(&v1_quota_format);
}
static void __exit exit_v1_quota_format(void)
{
unregister_quota_format(&v1_quota_format);
}
module_init(init_v1_quota_format);
module_exit(exit_v1_quota_format);
| linux-master | fs/quota/quota_v1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vfsv0 quota IO operations on file
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/dqblk_v2.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/quotaops.h>
#include <asm/byteorder.h>
#include "quota_tree.h"
#include "quotaio_v2.h"
MODULE_AUTHOR("Jan Kara");
MODULE_DESCRIPTION("Quota format v2 support");
MODULE_LICENSE("GPL");
static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot);
static void v2r0_disk2memdqb(struct dquot *dquot, void *dp);
static int v2r0_is_id(void *dp, struct dquot *dquot);
static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot);
static void v2r1_disk2memdqb(struct dquot *dquot, void *dp);
static int v2r1_is_id(void *dp, struct dquot *dquot);
static const struct qtree_fmt_operations v2r0_qtree_ops = {
.mem2disk_dqblk = v2r0_mem2diskdqb,
.disk2mem_dqblk = v2r0_disk2memdqb,
.is_id = v2r0_is_id,
};
static const struct qtree_fmt_operations v2r1_qtree_ops = {
.mem2disk_dqblk = v2r1_mem2diskdqb,
.disk2mem_dqblk = v2r1_disk2memdqb,
.is_id = v2r1_is_id,
};
#define QUOTABLOCK_BITS 10
#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
static inline qsize_t v2_stoqb(qsize_t space)
{
return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
}
static inline qsize_t v2_qbtos(qsize_t blocks)
{
return blocks << QUOTABLOCK_BITS;
}
static int v2_read_header(struct super_block *sb, int type,
struct v2_disk_dqheader *dqhead)
{
ssize_t size;
size = sb->s_op->quota_read(sb, type, (char *)dqhead,
sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader)) {
quota_error(sb, "Failed header read: expected=%zd got=%zd",
sizeof(struct v2_disk_dqheader), size);
if (size < 0)
return size;
return -EIO;
}
return 0;
}
/* Check whether given file is really vfsv0 quotafile */
static int v2_check_quota_file(struct super_block *sb, int type)
{
struct v2_disk_dqheader dqhead;
static const uint quota_magics[] = V2_INITQMAGICS;
static const uint quota_versions[] = V2_INITQVERSIONS;
if (v2_read_header(sb, type, &dqhead))
return 0;
if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
le32_to_cpu(dqhead.dqh_version) > quota_versions[type])
return 0;
return 1;
}
/* Read information header from quota file */
static int v2_read_file_info(struct super_block *sb, int type)
{
struct v2_disk_dqinfo dinfo;
struct v2_disk_dqheader dqhead;
struct quota_info *dqopt = sb_dqopt(sb);
struct mem_dqinfo *info = &dqopt->info[type];
struct qtree_mem_dqinfo *qinfo;
ssize_t size;
unsigned int version;
int ret;
down_read(&dqopt->dqio_sem);
ret = v2_read_header(sb, type, &dqhead);
if (ret < 0)
goto out;
version = le32_to_cpu(dqhead.dqh_version);
if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) ||
(info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) {
ret = -EINVAL;
goto out;
}
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
if (size != sizeof(struct v2_disk_dqinfo)) {
quota_error(sb, "Can't read info structure");
if (size < 0)
ret = size;
else
ret = -EIO;
goto out;
}
info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
if (!info->dqi_priv) {
ret = -ENOMEM;
goto out;
}
qinfo = info->dqi_priv;
if (version == 0) {
/* limits are stored as unsigned 32-bit data */
info->dqi_max_spc_limit = 0xffffffffLL << QUOTABLOCK_BITS;
info->dqi_max_ino_limit = 0xffffffff;
} else {
/*
* Used space is stored as unsigned 64-bit value in bytes but
* quota core supports only signed 64-bit values so use that
* as a limit
*/
info->dqi_max_spc_limit = 0x7fffffffffffffffLL; /* 2^63-1 */
info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
/* No flags currently supported */
info->dqi_flags = 0;
qinfo->dqi_sb = sb;
qinfo->dqi_type = type;
qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
qinfo->dqi_qtree_depth = qtree_depth(qinfo);
if (version == 0) {
qinfo->dqi_entry_size = sizeof(struct v2r0_disk_dqblk);
qinfo->dqi_ops = &v2r0_qtree_ops;
} else {
qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
qinfo->dqi_ops = &v2r1_qtree_ops;
}
ret = -EUCLEAN;
/* Some sanity checks of the read headers... */
if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits >
i_size_read(sb_dqopt(sb)->files[type])) {
quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
(loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
i_size_read(sb_dqopt(sb)->files[type]));
goto out_free;
}
if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
quota_error(sb, "Free block number too big (%u >= %u).",
qinfo->dqi_free_blk, qinfo->dqi_blocks);
goto out_free;
}
if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
quota_error(sb, "Block with free entry too big (%u >= %u).",
qinfo->dqi_free_entry, qinfo->dqi_blocks);
goto out_free;
}
ret = 0;
out_free:
if (ret) {
kfree(info->dqi_priv);
info->dqi_priv = NULL;
}
out:
up_read(&dqopt->dqio_sem);
return ret;
}
/* Write information header to quota file */
static int v2_write_file_info(struct super_block *sb, int type)
{
struct v2_disk_dqinfo dinfo;
struct quota_info *dqopt = sb_dqopt(sb);
struct mem_dqinfo *info = &dqopt->info[type];
struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
ssize_t size;
down_write(&dqopt->dqio_sem);
spin_lock(&dq_data_lock);
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
/* No flags currently supported */
dinfo.dqi_flags = cpu_to_le32(0);
spin_unlock(&dq_data_lock);
dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
up_write(&dqopt->dqio_sem);
if (size != sizeof(struct v2_disk_dqinfo)) {
quota_error(sb, "Can't write info structure");
return size < 0 ? size : -EIO;
}
return 0;
}
static void v2r0_disk2memdqb(struct dquot *dquot, void *dp)
{
struct v2r0_disk_dqblk *d = dp, empty;
struct mem_dqblk *m = &dquot->dq_dqb;
m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit);
m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes);
m->dqb_itime = le64_to_cpu(d->dqb_itime);
m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit));
m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit));
m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
m->dqb_btime = le64_to_cpu(d->dqb_btime);
/* We need to escape back all-zero structure */
memset(&empty, 0, sizeof(struct v2r0_disk_dqblk));
empty.dqb_itime = cpu_to_le64(1);
if (!memcmp(&empty, dp, sizeof(struct v2r0_disk_dqblk)))
m->dqb_itime = 0;
}
static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot)
{
struct v2r0_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
struct qtree_mem_dqinfo *info =
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes);
d->dqb_itime = cpu_to_le64(m->dqb_itime);
d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit));
d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit));
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
d->dqb_btime = cpu_to_le64(m->dqb_btime);
d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
if (qtree_entry_unused(info, dp))
d->dqb_itime = cpu_to_le64(1);
}
static int v2r0_is_id(void *dp, struct dquot *dquot)
{
struct v2r0_disk_dqblk *d = dp;
struct qtree_mem_dqinfo *info =
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
if (qtree_entry_unused(info, dp))
return 0;
return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
le32_to_cpu(d->dqb_id)),
dquot->dq_id);
}
static void v2r1_disk2memdqb(struct dquot *dquot, void *dp)
{
struct v2r1_disk_dqblk *d = dp, empty;
struct mem_dqblk *m = &dquot->dq_dqb;
m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
m->dqb_itime = le64_to_cpu(d->dqb_itime);
m->dqb_bhardlimit = v2_qbtos(le64_to_cpu(d->dqb_bhardlimit));
m->dqb_bsoftlimit = v2_qbtos(le64_to_cpu(d->dqb_bsoftlimit));
m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
m->dqb_btime = le64_to_cpu(d->dqb_btime);
/* We need to escape back all-zero structure */
memset(&empty, 0, sizeof(struct v2r1_disk_dqblk));
empty.dqb_itime = cpu_to_le64(1);
if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk)))
m->dqb_itime = 0;
}
static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
{
struct v2r1_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
struct qtree_mem_dqinfo *info =
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
d->dqb_itime = cpu_to_le64(m->dqb_itime);
d->dqb_bhardlimit = cpu_to_le64(v2_stoqb(m->dqb_bhardlimit));
d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit));
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
d->dqb_btime = cpu_to_le64(m->dqb_btime);
d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
d->dqb_pad = 0;
if (qtree_entry_unused(info, dp))
d->dqb_itime = cpu_to_le64(1);
}
static int v2r1_is_id(void *dp, struct dquot *dquot)
{
struct v2r1_disk_dqblk *d = dp;
struct qtree_mem_dqinfo *info =
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
if (qtree_entry_unused(info, dp))
return 0;
return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
le32_to_cpu(d->dqb_id)),
dquot->dq_id);
}
static int v2_read_dquot(struct dquot *dquot)
{
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
int ret;
down_read(&dqopt->dqio_sem);
ret = qtree_read_dquot(
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
dquot);
up_read(&dqopt->dqio_sem);
return ret;
}
static int v2_write_dquot(struct dquot *dquot)
{
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
int ret;
bool alloc = false;
/*
* If space for dquot is already allocated, we don't need any
* protection as we'll only overwrite the place of dquot. We are
* still protected by concurrent writes of the same dquot by
* dquot->dq_lock.
*/
if (!dquot->dq_off) {
alloc = true;
down_write(&dqopt->dqio_sem);
} else {
down_read(&dqopt->dqio_sem);
}
ret = qtree_write_dquot(
sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
dquot);
if (alloc)
up_write(&dqopt->dqio_sem);
else
up_read(&dqopt->dqio_sem);
return ret;
}
static int v2_release_dquot(struct dquot *dquot)
{
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
int ret;
down_write(&dqopt->dqio_sem);
ret = qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot);
up_write(&dqopt->dqio_sem);
return ret;
}
static int v2_free_file_info(struct super_block *sb, int type)
{
kfree(sb_dqinfo(sb, type)->dqi_priv);
return 0;
}
static int v2_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct quota_info *dqopt = sb_dqopt(sb);
int ret;
down_read(&dqopt->dqio_sem);
ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid);
up_read(&dqopt->dqio_sem);
return ret;
}
static const struct quota_format_ops v2_format_ops = {
.check_quota_file = v2_check_quota_file,
.read_file_info = v2_read_file_info,
.write_file_info = v2_write_file_info,
.free_file_info = v2_free_file_info,
.read_dqblk = v2_read_dquot,
.commit_dqblk = v2_write_dquot,
.release_dqblk = v2_release_dquot,
.get_next_id = v2_get_next_id,
};
static struct quota_format_type v2r0_quota_format = {
.qf_fmt_id = QFMT_VFS_V0,
.qf_ops = &v2_format_ops,
.qf_owner = THIS_MODULE
};
static struct quota_format_type v2r1_quota_format = {
.qf_fmt_id = QFMT_VFS_V1,
.qf_ops = &v2_format_ops,
.qf_owner = THIS_MODULE
};
static int __init init_v2_quota_format(void)
{
int ret;
ret = register_quota_format(&v2r0_quota_format);
if (ret)
return ret;
return register_quota_format(&v2r1_quota_format);
}
static void __exit exit_v2_quota_format(void)
{
unregister_quota_format(&v2r0_quota_format);
unregister_quota_format(&v2r1_quota_format);
}
module_init(init_v2_quota_format);
module_exit(exit_v2_quota_format);
| linux-master | fs/quota/quota_v2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vfsv0 quota IO operations on file
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/dqblk_v2.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/quotaops.h>
#include <asm/byteorder.h>
#include "quota_tree.h"
MODULE_AUTHOR("Jan Kara");
MODULE_DESCRIPTION("Quota trie support");
MODULE_LICENSE("GPL");
#define __QUOTA_QT_PARANOIA
static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
{
unsigned int epb = info->dqi_usable_bs >> 2;
depth = info->dqi_qtree_depth - depth - 1;
while (depth--)
id /= epb;
return id % epb;
}
static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
{
qid_t id = from_kqid(&init_user_ns, qid);
return __get_index(info, id, depth);
}
/* Number of entries in one blocks */
static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
{
return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
/ info->dqi_entry_size;
}
static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
memset(buf, 0, info->dqi_usable_bs);
return sb->s_op->quota_read(sb, info->dqi_type, buf,
info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
}
static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
{
struct super_block *sb = info->dqi_sb;
ssize_t ret;
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
if (ret != info->dqi_usable_bs) {
quota_error(sb, "dquota write failed");
if (ret >= 0)
ret = -EIO;
}
return ret;
}
static inline int do_check_range(struct super_block *sb, const char *val_name,
uint val, uint min_val, uint max_val)
{
if (val < min_val || val > max_val) {
quota_error(sb, "Getting %s %u out of range %u-%u",
val_name, val, min_val, max_val);
return -EUCLEAN;
}
return 0;
}
static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
struct qt_disk_dqdbheader *dh)
{
int err = 0;
err = do_check_range(info->dqi_sb, "dqdh_next_free",
le32_to_cpu(dh->dqdh_next_free), 0,
info->dqi_blocks - 1);
if (err)
return err;
err = do_check_range(info->dqi_sb, "dqdh_prev_free",
le32_to_cpu(dh->dqdh_prev_free), 0,
info->dqi_blocks - 1);
if (err)
return err;
err = do_check_range(info->dqi_sb, "dqdh_entries",
le16_to_cpu(dh->dqdh_entries), 0,
qtree_dqstr_in_blk(info));
return err;
}
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int ret, blk;
if (!buf)
return -ENOMEM;
if (info->dqi_free_blk) {
blk = info->dqi_free_blk;
ret = read_blk(info, blk, buf);
if (ret < 0)
goto out_buf;
ret = check_dquot_block_header(info, dh);
if (ret)
goto out_buf;
info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
}
else {
memset(buf, 0, info->dqi_usable_bs);
/* Assure block allocation... */
ret = write_blk(info, info->dqi_blocks, buf);
if (ret < 0)
goto out_buf;
blk = info->dqi_blocks++;
}
mark_info_dirty(info->dqi_sb, info->dqi_type);
ret = blk;
out_buf:
kfree(buf);
return ret;
}
/* Insert empty block to the list */
static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
{
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
dh->dqdh_prev_free = cpu_to_le32(0);
dh->dqdh_entries = cpu_to_le16(0);
err = write_blk(info, blk, buf);
if (err < 0)
return err;
info->dqi_free_blk = blk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
return 0;
}
/* Remove given block from the list of blocks with free entries */
static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
uint blk)
{
char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
uint nextblk = le32_to_cpu(dh->dqdh_next_free);
uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
int err;
if (!tmpbuf)
return -ENOMEM;
if (nextblk) {
err = read_blk(info, nextblk, tmpbuf);
if (err < 0)
goto out_buf;
((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
dh->dqdh_prev_free;
err = write_blk(info, nextblk, tmpbuf);
if (err < 0)
goto out_buf;
}
if (prevblk) {
err = read_blk(info, prevblk, tmpbuf);
if (err < 0)
goto out_buf;
((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
dh->dqdh_next_free;
err = write_blk(info, prevblk, tmpbuf);
if (err < 0)
goto out_buf;
} else {
info->dqi_free_entry = nextblk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
}
kfree(tmpbuf);
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
/* No matter whether write succeeds block is out of list */
if (write_blk(info, blk, buf) < 0)
quota_error(info->dqi_sb, "Can't write block (%u) "
"with free entries", blk);
return 0;
out_buf:
kfree(tmpbuf);
return err;
}
/* Insert given block to the beginning of list with free entries */
static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
uint blk)
{
char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
int err;
if (!tmpbuf)
return -ENOMEM;
dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
dh->dqdh_prev_free = cpu_to_le32(0);
err = write_blk(info, blk, buf);
if (err < 0)
goto out_buf;
if (info->dqi_free_entry) {
err = read_blk(info, info->dqi_free_entry, tmpbuf);
if (err < 0)
goto out_buf;
((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
cpu_to_le32(blk);
err = write_blk(info, info->dqi_free_entry, tmpbuf);
if (err < 0)
goto out_buf;
}
kfree(tmpbuf);
info->dqi_free_entry = blk;
mark_info_dirty(info->dqi_sb, info->dqi_type);
return 0;
out_buf:
kfree(tmpbuf);
return err;
}
/* Is the entry in the block free? */
int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
{
int i;
for (i = 0; i < info->dqi_entry_size; i++)
if (disk[i])
return 0;
return 1;
}
EXPORT_SYMBOL(qtree_entry_unused);
/* Find space for dquot */
static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, int *err)
{
uint blk, i;
struct qt_disk_dqdbheader *dh;
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
char *ddquot;
*err = 0;
if (!buf) {
*err = -ENOMEM;
return 0;
}
dh = (struct qt_disk_dqdbheader *)buf;
if (info->dqi_free_entry) {
blk = info->dqi_free_entry;
*err = read_blk(info, blk, buf);
if (*err < 0)
goto out_buf;
*err = check_dquot_block_header(info, dh);
if (*err)
goto out_buf;
} else {
blk = get_free_dqblk(info);
if ((int)blk < 0) {
*err = blk;
kfree(buf);
return 0;
}
memset(buf, 0, info->dqi_usable_bs);
/* This is enough as the block is already zeroed and the entry
* list is empty... */
info->dqi_free_entry = blk;
mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
}
/* Block will be full? */
if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
*err = remove_free_dqentry(info, buf, blk);
if (*err < 0) {
quota_error(dquot->dq_sb, "Can't remove block (%u) "
"from entry free list", blk);
goto out_buf;
}
}
le16_add_cpu(&dh->dqdh_entries, 1);
/* Find free structure in block */
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
if (qtree_entry_unused(info, ddquot))
break;
ddquot += info->dqi_entry_size;
}
#ifdef __QUOTA_QT_PARANOIA
if (i == qtree_dqstr_in_blk(info)) {
quota_error(dquot->dq_sb, "Data block full but it shouldn't");
*err = -EIO;
goto out_buf;
}
#endif
*err = write_blk(info, blk, buf);
if (*err < 0) {
quota_error(dquot->dq_sb, "Can't write quota data block %u",
blk);
goto out_buf;
}
dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
sizeof(struct qt_disk_dqdbheader) +
i * info->dqi_entry_size;
kfree(buf);
return blk;
out_buf:
kfree(buf);
return 0;
}
/* Insert reference to structure into the trie */
static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *treeblk, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
int ret = 0, newson = 0, newact = 0;
__le32 *ref;
uint newblk;
if (!buf)
return -ENOMEM;
if (!*treeblk) {
ret = get_free_dqblk(info);
if (ret < 0)
goto out_buf;
*treeblk = ret;
memset(buf, 0, info->dqi_usable_bs);
newact = 1;
} else {
ret = read_blk(info, *treeblk, buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read tree quota "
"block %u", *treeblk);
goto out_buf;
}
}
ref = (__le32 *)buf;
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (!newblk)
newson = 1;
if (depth == info->dqi_qtree_depth - 1) {
#ifdef __QUOTA_QT_PARANOIA
if (newblk) {
quota_error(dquot->dq_sb, "Inserting already present "
"quota entry (block %u)",
le32_to_cpu(ref[get_index(info,
dquot->dq_id, depth)]));
ret = -EIO;
goto out_buf;
}
#endif
newblk = find_free_dqentry(info, dquot, &ret);
} else {
ret = do_insert_tree(info, dquot, &newblk, depth+1);
}
if (newson && ret >= 0) {
ref[get_index(info, dquot->dq_id, depth)] =
cpu_to_le32(newblk);
ret = write_blk(info, *treeblk, buf);
} else if (newact && ret < 0) {
put_free_dqblk(info, buf, *treeblk);
}
out_buf:
kfree(buf);
return ret;
}
/* Wrapper for inserting quota structure into tree */
static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
struct dquot *dquot)
{
int tmp = QT_TREEOFF;
#ifdef __QUOTA_QT_PARANOIA
if (info->dqi_blocks <= QT_TREEOFF) {
quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
return -EIO;
}
#endif
return do_insert_tree(info, dquot, &tmp, 0);
}
/*
* We don't have to be afraid of deadlocks as we never have quotas on quota
* files...
*/
int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
int type = dquot->dq_id.type;
struct super_block *sb = dquot->dq_sb;
ssize_t ret;
char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
if (!ddquot)
return -ENOMEM;
/* dq_off is guarded by dqio_sem */
if (!dquot->dq_off) {
ret = dq_insert_tree(info, dquot);
if (ret < 0) {
quota_error(sb, "Error %zd occurred while creating "
"quota", ret);
kfree(ddquot);
return ret;
}
}
spin_lock(&dquot->dq_dqb_lock);
info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
spin_unlock(&dquot->dq_dqb_lock);
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
quota_error(sb, "dquota write failed");
if (ret >= 0)
ret = -ENOSPC;
} else {
ret = 0;
}
dqstats_inc(DQST_WRITES);
kfree(ddquot);
return ret;
}
EXPORT_SYMBOL(qtree_write_dquot);
/* Free dquot entry in data block */
static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint blk)
{
struct qt_disk_dqdbheader *dh;
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
int ret = 0;
if (!buf)
return -ENOMEM;
if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
quota_error(dquot->dq_sb, "Quota structure has offset to "
"other block (%u) than it should (%u)", blk,
(uint)(dquot->dq_off >> info->dqi_blocksize_bits));
ret = -EIO;
goto out_buf;
}
ret = read_blk(info, blk, buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota data block %u",
blk);
goto out_buf;
}
dh = (struct qt_disk_dqdbheader *)buf;
ret = check_dquot_block_header(info, dh);
if (ret)
goto out_buf;
le16_add_cpu(&dh->dqdh_entries, -1);
if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
ret = remove_free_dqentry(info, buf, blk);
if (ret >= 0)
ret = put_free_dqblk(info, buf, blk);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't move quota data block "
"(%u) to free list", blk);
goto out_buf;
}
} else {
memset(buf +
(dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
0, info->dqi_entry_size);
if (le16_to_cpu(dh->dqdh_entries) ==
qtree_dqstr_in_blk(info) - 1) {
/* Insert will write block itself */
ret = insert_free_dqentry(info, buf, blk);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't insert quota "
"data block (%u) to free entry list", blk);
goto out_buf;
}
} else {
ret = write_blk(info, blk, buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't write quota "
"data block %u", blk);
goto out_buf;
}
}
}
dquot->dq_off = 0; /* Quota is now unattached */
out_buf:
kfree(buf);
return ret;
}
/* Remove reference to dquot from tree */
static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
uint *blk, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
int ret = 0;
uint newblk;
__le32 *ref = (__le32 *)buf;
if (!buf)
return -ENOMEM;
ret = read_blk(info, *blk, buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota data block %u",
*blk);
goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (depth == info->dqi_qtree_depth - 1) {
ret = free_dqentry(info, dquot, newblk);
newblk = 0;
} else {
ret = remove_tree(info, dquot, &newblk, depth+1);
}
if (ret >= 0 && !newblk) {
int i;
ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
/* Block got empty? */
for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
;
/* Don't put the root block into the free block list */
if (i == (info->dqi_usable_bs >> 2)
&& *blk != QT_TREEOFF) {
put_free_dqblk(info, buf, *blk);
*blk = 0;
} else {
ret = write_blk(info, *blk, buf);
if (ret < 0)
quota_error(dquot->dq_sb,
"Can't write quota tree block %u",
*blk);
}
}
out_buf:
kfree(buf);
return ret;
}
/* Delete dquot from tree */
int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
uint tmp = QT_TREEOFF;
if (!dquot->dq_off) /* Even not allocated? */
return 0;
return remove_tree(info, dquot, &tmp, 0);
}
EXPORT_SYMBOL(qtree_delete_dquot);
/* Find entry in block */
static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
loff_t ret = 0;
int i;
char *ddquot;
if (!buf)
return -ENOMEM;
ret = read_blk(info, blk, buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota tree "
"block %u", blk);
goto out_buf;
}
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
if (info->dqi_ops->is_id(ddquot, dquot))
break;
ddquot += info->dqi_entry_size;
}
if (i == qtree_dqstr_in_blk(info)) {
quota_error(dquot->dq_sb,
"Quota for id %u referenced but not present",
from_kqid(&init_user_ns, dquot->dq_id));
ret = -EIO;
goto out_buf;
} else {
ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
qt_disk_dqdbheader) + i * info->dqi_entry_size;
}
out_buf:
kfree(buf);
return ret;
}
/* Find entry for given id in the tree */
static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot, uint blk, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
loff_t ret = 0;
__le32 *ref = (__le32 *)buf;
if (!buf)
return -ENOMEM;
ret = read_blk(info, blk, buf);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't read quota tree block %u",
blk);
goto out_buf;
}
ret = 0;
blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
if (!blk) /* No reference? */
goto out_buf;
ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (depth < info->dqi_qtree_depth - 1)
ret = find_tree_dqentry(info, dquot, blk, depth+1);
else
ret = find_block_dqentry(info, dquot, blk);
out_buf:
kfree(buf);
return ret;
}
/* Find entry for given id in the tree - wrapper function */
static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
struct dquot *dquot)
{
return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
}
int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
int type = dquot->dq_id.type;
struct super_block *sb = dquot->dq_sb;
loff_t offset;
char *ddquot;
int ret = 0;
#ifdef __QUOTA_QT_PARANOIA
/* Invalidated quota? */
if (!sb_dqopt(dquot->dq_sb)->files[type]) {
quota_error(sb, "Quota invalidated while reading!");
return -EIO;
}
#endif
/* Do we know offset of the dquot entry in the quota file? */
if (!dquot->dq_off) {
offset = find_dqentry(info, dquot);
if (offset <= 0) { /* Entry not present? */
if (offset < 0)
quota_error(sb,"Can't read quota structure "
"for id %u",
from_kqid(&init_user_ns,
dquot->dq_id));
dquot->dq_off = 0;
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
ret = offset;
goto out;
}
dquot->dq_off = offset;
}
ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
if (!ddquot)
return -ENOMEM;
ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
if (ret >= 0)
ret = -EIO;
quota_error(sb, "Error while reading quota structure for id %u",
from_kqid(&init_user_ns, dquot->dq_id));
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
kfree(ddquot);
goto out;
}
spin_lock(&dquot->dq_dqb_lock);
info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
if (!dquot->dq_dqb.dqb_bhardlimit &&
!dquot->dq_dqb.dqb_bsoftlimit &&
!dquot->dq_dqb.dqb_ihardlimit &&
!dquot->dq_dqb.dqb_isoftlimit)
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dquot->dq_dqb_lock);
kfree(ddquot);
out:
dqstats_inc(DQST_READS);
return ret;
}
EXPORT_SYMBOL(qtree_read_dquot);
/* Check whether dquot should not be deleted. We know we are
* the only one operating on dquot (thanks to dq_lock) */
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
!(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
return qtree_delete_dquot(info, dquot);
return 0;
}
EXPORT_SYMBOL(qtree_release_dquot);
static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
unsigned int blk, int depth)
{
char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
__le32 *ref = (__le32 *)buf;
ssize_t ret;
unsigned int epb = info->dqi_usable_bs >> 2;
unsigned int level_inc = 1;
int i;
if (!buf)
return -ENOMEM;
for (i = depth; i < info->dqi_qtree_depth - 1; i++)
level_inc *= epb;
ret = read_blk(info, blk, buf);
if (ret < 0) {
quota_error(info->dqi_sb,
"Can't read quota tree block %u", blk);
goto out_buf;
}
for (i = __get_index(info, *id, depth); i < epb; i++) {
uint blk_no = le32_to_cpu(ref[i]);
if (blk_no == 0) {
*id += level_inc;
continue;
}
ret = do_check_range(info->dqi_sb, "block", blk_no, 0,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (depth == info->dqi_qtree_depth - 1) {
ret = 0;
goto out_buf;
}
ret = find_next_id(info, id, blk_no, depth + 1);
if (ret != -ENOENT)
break;
}
if (i == epb) {
ret = -ENOENT;
goto out_buf;
}
out_buf:
kfree(buf);
return ret;
}
int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
{
qid_t id = from_kqid(&init_user_ns, *qid);
int ret;
ret = find_next_id(info, &id, QT_TREEOFF, 0);
if (ret < 0)
return ret;
*qid = make_kqid(&init_user_ns, qid->type, id);
return 0;
}
EXPORT_SYMBOL(qtree_get_next_id);
| linux-master | fs/quota/quota_tree.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of the diskquota system for the LINUX operating system. QUOTA
* is implemented using the BSD system call interface as the means of
* communication with the user level. This file contains the generic routines
* called by the different filesystems on allocation of an inode or block.
* These routines take care of the administration needed to have a consistent
* diskquota tracking system. The ideas of both user and group quotas are based
* on the Melbourne quota system as used on BSD derived systems. The internal
* implementation is based on one of the several variants of the LINUX
* inode-subsystem with added complexity of the diskquota system.
*
* Author: Marco van Wieringen <[email protected]>
*
* Fixes: Dmitry Gorodchanin <[email protected]>, 11 Feb 96
*
* Revised list management to avoid races
* -- Bill Hawes, <[email protected]>, 9/98
*
* Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
* As the consequence the locking was moved from dquot_decr_...(),
* dquot_incr_...() to calling functions.
* invalidate_dquots() now writes modified dquots.
* Serialized quota_off() and quota_on() for mount point.
* Fixed a few bugs in grow_dquots().
* Fixed deadlock in write_dquot() - we no longer account quotas on
* quota files
* remove_dquot_ref() moved to inode.c - it now traverses through inodes
* add_dquot_ref() restarts after blocking
* Added check for bogus uid and fixed check for group in quotactl.
* Jan Kara, <[email protected]>, sponsored by SuSE CR, 10-11/99
*
* Used struct list_head instead of own list struct
* Invalidation of referenced dquots is no longer possible
* Improved free_dquots list management
* Quota and i_blocks are now updated in one place to avoid races
* Warnings are now delayed so we won't block in critical section
* Write updated not to require dquot lock
* Jan Kara, <[email protected]>, 9/2000
*
* Added dynamic quota structure allocation
* Jan Kara <[email protected]> 12/2000
*
* Rewritten quota interface. Implemented new quota format and
* formats registering.
* Jan Kara, <[email protected]>, 2001,2002
*
* New SMP locking.
* Jan Kara, <[email protected]>, 10/2002
*
* Added journalled quota support, fix lock inversion problems
* Jan Kara, <[email protected]>, 2003,2004
*
* (C) Copyright 1994 - 1997 Marco van Wieringen
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/tty.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/kmod.h>
#include <linux/namei.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
#include <linux/sched/mm.h>
#include "../internal.h" /* ugh */
#include <linux/uaccess.h>
/*
* There are five quota SMP locks:
* * dq_list_lock protects all lists with quotas and quota formats.
* * dquot->dq_dqb_lock protects data from dq_dqb
* * inode->i_lock protects inode->i_blocks, i_bytes and also guards
* consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
* dquot_transfer() can stabilize amount it transfers
* * dq_data_lock protects mem_dqinfo structures and modifications of dquot
* pointers in the inode
* * dq_state_lock protects modifications of quota state (on quotaon and
* quotaoff) and readers who care about latest values take it as well.
*
* The spinlock ordering is hence:
* dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
* dq_list_lock > dq_state_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
*
* Operation accessing dquots via inode pointers are protected by dquot_srcu.
* Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
* synchronize_srcu(&dquot_srcu) is called after clearing pointers from
* inode and before dropping dquot references to avoid use of dquots after
* they are freed. dq_data_lock is used to serialize the pointer setting and
* clearing operations.
* Special care needs to be taken about S_NOQUOTA inode flag (marking that
* inode is a quota file). Functions adding pointers from inode to dquots have
* to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
* have to do all pointer modifications before dropping dq_data_lock. This makes
* sure they cannot race with quotaon which first sets S_NOQUOTA flag and
* then drops all pointers to dquots from an inode.
*
* Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
* memory (or space for it is being allocated) on the first dqget(), when it is
* being written out, and when it is being released on the last dqput(). The
* allocation and release operations are serialized by the dq_lock and by
* checking the use count in dquot_release().
*
* Lock ordering (including related VFS locks) is the following:
* s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
DEFINE_STATIC_SRCU(dquot_srcu);
static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
void __quota_error(struct super_block *sb, const char *func,
const char *fmt, ...)
{
if (printk_ratelimit()) {
va_list args;
struct va_format vaf;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
sb->s_id, func, &vaf);
va_end(args);
}
}
EXPORT_SYMBOL(__quota_error);
#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
static char *quotatypes[] = INITQFNAMES;
#endif
static struct quota_format_type *quota_formats; /* List of registered formats */
static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
/* SLAB cache for dquot structures */
static struct kmem_cache *dquot_cachep;
int register_quota_format(struct quota_format_type *fmt)
{
spin_lock(&dq_list_lock);
fmt->qf_next = quota_formats;
quota_formats = fmt;
spin_unlock(&dq_list_lock);
return 0;
}
EXPORT_SYMBOL(register_quota_format);
void unregister_quota_format(struct quota_format_type *fmt)
{
struct quota_format_type **actqf;
spin_lock(&dq_list_lock);
for (actqf = "a_formats; *actqf && *actqf != fmt;
actqf = &(*actqf)->qf_next)
;
if (*actqf)
*actqf = (*actqf)->qf_next;
spin_unlock(&dq_list_lock);
}
EXPORT_SYMBOL(unregister_quota_format);
static struct quota_format_type *find_quota_format(int id)
{
struct quota_format_type *actqf;
spin_lock(&dq_list_lock);
for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
actqf = actqf->qf_next)
;
if (!actqf || !try_module_get(actqf->qf_owner)) {
int qm;
spin_unlock(&dq_list_lock);
for (qm = 0; module_names[qm].qm_fmt_id &&
module_names[qm].qm_fmt_id != id; qm++)
;
if (!module_names[qm].qm_fmt_id ||
request_module(module_names[qm].qm_mod_name))
return NULL;
spin_lock(&dq_list_lock);
for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
actqf = actqf->qf_next)
;
if (actqf && !try_module_get(actqf->qf_owner))
actqf = NULL;
}
spin_unlock(&dq_list_lock);
return actqf;
}
static void put_quota_format(struct quota_format_type *fmt)
{
module_put(fmt->qf_owner);
}
/*
* Dquot List Management:
* The quota code uses five lists for dquot management: the inuse_list,
* releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
* A single dquot structure may be on some of those lists, depending on
* its current state.
*
* All dquots are placed to the end of inuse_list when first created, and this
* list is used for invalidate operation, which must look at every dquot.
*
* When the last reference of a dquot will be dropped, the dquot will be
* added to releasing_dquots. We'd then queue work item which would call
* synchronize_srcu() and after that perform the final cleanup of all the
* dquots on the list. Both releasing_dquots and free_dquots use the
* dq_free list_head in the dquot struct. When a dquot is removed from
* releasing_dquots, a reference count is always subtracted, and if
* dq_count == 0 at that point, the dquot will be added to the free_dquots.
*
* Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
* and this list is searched whenever we need an available dquot. Dquots are
* removed from the list as soon as they are used again, and
* dqstats.free_dquots gives the number of dquots on the list. When
* dquot is invalidated it's completely released from memory.
*
* Dirty dquots are added to the dqi_dirty_list of quota_info when mark
* dirtied, and this list is searched when writing dirty dquots back to
* quota file. Note that some filesystems do dirty dquot tracking on their
* own (e.g. in a journal) and thus don't use dqi_dirty_list.
*
* Dquots with a specific identity (device, type and id) are placed on
* one of the dquot_hash[] hash chains. The provides an efficient search
* mechanism to locate a specific dquot.
*/
static LIST_HEAD(inuse_list);
static LIST_HEAD(free_dquots);
static LIST_HEAD(releasing_dquots);
static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;
struct dqstats dqstats;
EXPORT_SYMBOL(dqstats);
static qsize_t inode_get_rsv_space(struct inode *inode);
static qsize_t __inode_get_rsv_space(struct inode *inode);
static int __dquot_initialize(struct inode *inode, int type);
static void quota_release_workfn(struct work_struct *work);
static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
static inline unsigned int
hashfn(const struct super_block *sb, struct kqid qid)
{
unsigned int id = from_kqid(&init_user_ns, qid);
int type = qid.type;
unsigned long tmp;
tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
}
/*
* Following list functions expect dq_list_lock to be held
*/
static inline void insert_dquot_hash(struct dquot *dquot)
{
struct hlist_head *head;
head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
hlist_add_head(&dquot->dq_hash, head);
}
static inline void remove_dquot_hash(struct dquot *dquot)
{
hlist_del_init(&dquot->dq_hash);
}
static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
struct kqid qid)
{
struct dquot *dquot;
hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
return dquot;
return NULL;
}
/* Add a dquot to the tail of the free list */
static inline void put_dquot_last(struct dquot *dquot)
{
list_add_tail(&dquot->dq_free, &free_dquots);
dqstats_inc(DQST_FREE_DQUOTS);
}
static inline void put_releasing_dquots(struct dquot *dquot)
{
list_add_tail(&dquot->dq_free, &releasing_dquots);
}
static inline void remove_free_dquot(struct dquot *dquot)
{
if (list_empty(&dquot->dq_free))
return;
list_del_init(&dquot->dq_free);
if (!atomic_read(&dquot->dq_count))
dqstats_dec(DQST_FREE_DQUOTS);
}
static inline void put_inuse(struct dquot *dquot)
{
/* We add to the back of inuse list so we don't have to restart
* when traversing this list and we block */
list_add_tail(&dquot->dq_inuse, &inuse_list);
dqstats_inc(DQST_ALLOC_DQUOTS);
}
static inline void remove_inuse(struct dquot *dquot)
{
dqstats_dec(DQST_ALLOC_DQUOTS);
list_del(&dquot->dq_inuse);
}
/*
* End of list functions needing dq_list_lock
*/
static void wait_on_dquot(struct dquot *dquot)
{
mutex_lock(&dquot->dq_lock);
mutex_unlock(&dquot->dq_lock);
}
static inline int dquot_active(struct dquot *dquot)
{
return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
}
static inline int dquot_dirty(struct dquot *dquot)
{
return test_bit(DQ_MOD_B, &dquot->dq_flags);
}
static inline int mark_dquot_dirty(struct dquot *dquot)
{
return dquot->dq_sb->dq_op->mark_dirty(dquot);
}
/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
int dquot_mark_dquot_dirty(struct dquot *dquot)
{
int ret = 1;
if (!dquot_active(dquot))
return 0;
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
/* If quota is dirty already, we don't have to acquire dq_list_lock */
if (dquot_dirty(dquot))
return 1;
spin_lock(&dq_list_lock);
if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
info[dquot->dq_id.type].dqi_dirty_list);
ret = 0;
}
spin_unlock(&dq_list_lock);
return ret;
}
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* Dirtify all the dquots - this can block when journalling */
static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
{
int ret, err, cnt;
ret = err = 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquot[cnt])
/* Even in case of error we have to continue */
ret = mark_dquot_dirty(dquot[cnt]);
if (!err)
err = ret;
}
return err;
}
static inline void dqput_all(struct dquot **dquot)
{
unsigned int cnt;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
dqput(dquot[cnt]);
}
static inline int clear_dquot_dirty(struct dquot *dquot)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
spin_lock(&dq_list_lock);
if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
spin_unlock(&dq_list_lock);
return 0;
}
list_del_init(&dquot->dq_dirty);
spin_unlock(&dq_list_lock);
return 1;
}
void mark_info_dirty(struct super_block *sb, int type)
{
spin_lock(&dq_data_lock);
sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
spin_unlock(&dq_data_lock);
}
EXPORT_SYMBOL(mark_info_dirty);
/*
* Read dquot from disk and alloc space for it
*/
int dquot_acquire(struct dquot *dquot)
{
int ret = 0, ret2 = 0;
unsigned int memalloc;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dquot->dq_lock);
memalloc = memalloc_nofs_save();
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
if (ret < 0)
goto out_iolock;
}
/* Make sure flags update is visible after dquot has been filled */
smp_mb__before_atomic();
set_bit(DQ_READ_B, &dquot->dq_flags);
/* Instantiate dquot if needed */
if (!dquot_active(dquot) && !dquot->dq_off) {
ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
/* Write the info if needed */
if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
dquot->dq_sb, dquot->dq_id.type);
}
if (ret < 0)
goto out_iolock;
if (ret2 < 0) {
ret = ret2;
goto out_iolock;
}
}
/*
* Make sure flags update is visible after on-disk struct has been
* allocated. Paired with smp_rmb() in dqget().
*/
smp_mb__before_atomic();
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_iolock:
memalloc_nofs_restore(memalloc);
mutex_unlock(&dquot->dq_lock);
return ret;
}
EXPORT_SYMBOL(dquot_acquire);
/*
* Write dquot to disk
*/
int dquot_commit(struct dquot *dquot)
{
int ret = 0;
unsigned int memalloc;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dquot->dq_lock);
memalloc = memalloc_nofs_save();
if (!clear_dquot_dirty(dquot))
goto out_lock;
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
if (dquot_active(dquot))
ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
else
ret = -EIO;
out_lock:
memalloc_nofs_restore(memalloc);
mutex_unlock(&dquot->dq_lock);
return ret;
}
EXPORT_SYMBOL(dquot_commit);
/*
* Release dquot
*/
int dquot_release(struct dquot *dquot)
{
int ret = 0, ret2 = 0;
unsigned int memalloc;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dquot->dq_lock);
memalloc = memalloc_nofs_save();
/* Check whether we are not racing with some other dqget() */
if (dquot_is_busy(dquot))
goto out_dqlock;
if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
/* Write the info */
if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
dquot->dq_sb, dquot->dq_id.type);
}
if (ret >= 0)
ret = ret2;
}
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_dqlock:
memalloc_nofs_restore(memalloc);
mutex_unlock(&dquot->dq_lock);
return ret;
}
EXPORT_SYMBOL(dquot_release);
void dquot_destroy(struct dquot *dquot)
{
kmem_cache_free(dquot_cachep, dquot);
}
EXPORT_SYMBOL(dquot_destroy);
static inline void do_destroy_dquot(struct dquot *dquot)
{
dquot->dq_sb->dq_op->destroy_dquot(dquot);
}
/* Invalidate all dquots on the list. Note that this function is called after
* quota is disabled and pointers from inodes removed so there cannot be new
* quota users. There can still be some users of quotas due to inodes being
* just deleted or pruned by prune_icache() (those are not attached to any
* list) or parallel quotactl call. We have to wait for such users.
*/
static void invalidate_dquots(struct super_block *sb, int type)
{
struct dquot *dquot, *tmp;
restart:
flush_delayed_work("a_release_work);
spin_lock(&dq_list_lock);
list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
if (dquot->dq_sb != sb)
continue;
if (dquot->dq_id.type != type)
continue;
/* Wait for dquot users */
if (atomic_read(&dquot->dq_count)) {
/* dquot in releasing_dquots, flush and retry */
if (!list_empty(&dquot->dq_free)) {
spin_unlock(&dq_list_lock);
goto restart;
}
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
/*
* Once dqput() wakes us up, we know it's time to free
* the dquot.
* IMPORTANT: we rely on the fact that there is always
* at most one process waiting for dquot to free.
* Otherwise dq_count would be > 1 and we would never
* wake up.
*/
wait_event(dquot_ref_wq,
atomic_read(&dquot->dq_count) == 1);
dqput(dquot);
/* At this moment dquot() need not exist (it could be
* reclaimed by prune_dqcache(). Hence we must
* restart. */
goto restart;
}
/*
* Quota now has no users and it has been written on last
* dqput()
*/
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
}
spin_unlock(&dq_list_lock);
}
/* Call callback for every active dquot on given filesystem */
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
unsigned long priv)
{
struct dquot *dquot, *old_dquot = NULL;
int ret = 0;
WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
spin_lock(&dq_list_lock);
list_for_each_entry(dquot, &inuse_list, dq_inuse) {
if (!dquot_active(dquot))
continue;
if (dquot->dq_sb != sb)
continue;
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
dqput(old_dquot);
old_dquot = dquot;
/*
* ->release_dquot() can be racing with us. Our reference
* protects us from new calls to it so just wait for any
* outstanding call and recheck the DQ_ACTIVE_B after that.
*/
wait_on_dquot(dquot);
if (dquot_active(dquot)) {
ret = fn(dquot, priv);
if (ret < 0)
goto out;
}
spin_lock(&dq_list_lock);
/* We are safe to continue now because our dquot could not
* be moved out of the inuse list while we hold the reference */
}
spin_unlock(&dq_list_lock);
out:
dqput(old_dquot);
return ret;
}
EXPORT_SYMBOL(dquot_scan_active);
static inline int dquot_write_dquot(struct dquot *dquot)
{
int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
if (ret < 0) {
quota_error(dquot->dq_sb, "Can't write quota structure "
"(error %d). Quota may get out of sync!", ret);
/* Clear dirty bit anyway to avoid infinite loop. */
clear_dquot_dirty(dquot);
}
return ret;
}
/* Write all dquot structures to quota files */
int dquot_writeback_dquots(struct super_block *sb, int type)
{
struct list_head dirty;
struct dquot *dquot;
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
int err, ret = 0;
WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
spin_lock(&dq_list_lock);
/* Move list away to avoid livelock. */
list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
while (!list_empty(&dirty)) {
dquot = list_first_entry(&dirty, struct dquot,
dq_dirty);
WARN_ON(!dquot_active(dquot));
/* Now we have active dquot from which someone is
* holding reference so we can safely just increase
* use count */
dqgrab(dquot);
spin_unlock(&dq_list_lock);
err = dquot_write_dquot(dquot);
if (err && !ret)
ret = err;
dqput(dquot);
spin_lock(&dq_list_lock);
}
spin_unlock(&dq_list_lock);
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
&& info_dirty(&dqopt->info[cnt]))
sb->dq_op->write_info(sb, cnt);
dqstats_inc(DQST_SYNCS);
return ret;
}
EXPORT_SYMBOL(dquot_writeback_dquots);
/* Write all dquot structures to disk and make them visible from userspace */
int dquot_quota_sync(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
int ret;
ret = dquot_writeback_dquots(sb, type);
if (ret)
return ret;
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
return 0;
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible... */
if (sb->s_op->sync_fs) {
ret = sb->s_op->sync_fs(sb, 1);
if (ret)
return ret;
}
ret = sync_blockdev(sb->s_bdev);
if (ret)
return ret;
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
inode_lock(dqopt->files[cnt]);
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
inode_unlock(dqopt->files[cnt]);
}
return 0;
}
EXPORT_SYMBOL(dquot_quota_sync);
static unsigned long
dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct dquot *dquot;
unsigned long freed = 0;
spin_lock(&dq_list_lock);
while (!list_empty(&free_dquots) && sc->nr_to_scan) {
dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
sc->nr_to_scan--;
freed++;
}
spin_unlock(&dq_list_lock);
return freed;
}
static unsigned long
dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
return vfs_pressure_ratio(
percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
}
static struct shrinker dqcache_shrinker = {
.count_objects = dqcache_shrink_count,
.scan_objects = dqcache_shrink_scan,
.seeks = DEFAULT_SEEKS,
};
/*
* Safely release dquot and put reference to dquot.
*/
static void quota_release_workfn(struct work_struct *work)
{
struct dquot *dquot;
struct list_head rls_head;
spin_lock(&dq_list_lock);
/* Exchange the list head to avoid livelock. */
list_replace_init(&releasing_dquots, &rls_head);
spin_unlock(&dq_list_lock);
restart:
synchronize_srcu(&dquot_srcu);
spin_lock(&dq_list_lock);
while (!list_empty(&rls_head)) {
dquot = list_first_entry(&rls_head, struct dquot, dq_free);
/* Dquot got used again? */
if (atomic_read(&dquot->dq_count) > 1) {
remove_free_dquot(dquot);
atomic_dec(&dquot->dq_count);
continue;
}
if (dquot_dirty(dquot)) {
spin_unlock(&dq_list_lock);
/* Commit dquot before releasing */
dquot_write_dquot(dquot);
goto restart;
}
if (dquot_active(dquot)) {
spin_unlock(&dq_list_lock);
dquot->dq_sb->dq_op->release_dquot(dquot);
goto restart;
}
/* Dquot is inactive and clean, now move it to free list */
remove_free_dquot(dquot);
atomic_dec(&dquot->dq_count);
put_dquot_last(dquot);
}
spin_unlock(&dq_list_lock);
}
/*
* Put reference to dquot
*/
void dqput(struct dquot *dquot)
{
if (!dquot)
return;
#ifdef CONFIG_QUOTA_DEBUG
if (!atomic_read(&dquot->dq_count)) {
quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
quotatypes[dquot->dq_id.type],
from_kqid(&init_user_ns, dquot->dq_id));
BUG();
}
#endif
dqstats_inc(DQST_DROPS);
spin_lock(&dq_list_lock);
if (atomic_read(&dquot->dq_count) > 1) {
/* We have more than one user... nothing to do */
atomic_dec(&dquot->dq_count);
/* Releasing dquot during quotaoff phase? */
if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
atomic_read(&dquot->dq_count) == 1)
wake_up(&dquot_ref_wq);
spin_unlock(&dq_list_lock);
return;
}
/* Need to release dquot? */
#ifdef CONFIG_QUOTA_DEBUG
/* sanity check */
BUG_ON(!list_empty(&dquot->dq_free));
#endif
put_releasing_dquots(dquot);
spin_unlock(&dq_list_lock);
queue_delayed_work(system_unbound_wq, "a_release_work, 1);
}
EXPORT_SYMBOL(dqput);
struct dquot *dquot_alloc(struct super_block *sb, int type)
{
return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
}
EXPORT_SYMBOL(dquot_alloc);
static struct dquot *get_empty_dquot(struct super_block *sb, int type)
{
struct dquot *dquot;
dquot = sb->dq_op->alloc_dquot(sb, type);
if(!dquot)
return NULL;
mutex_init(&dquot->dq_lock);
INIT_LIST_HEAD(&dquot->dq_free);
INIT_LIST_HEAD(&dquot->dq_inuse);
INIT_HLIST_NODE(&dquot->dq_hash);
INIT_LIST_HEAD(&dquot->dq_dirty);
dquot->dq_sb = sb;
dquot->dq_id = make_kqid_invalid(type);
atomic_set(&dquot->dq_count, 1);
spin_lock_init(&dquot->dq_dqb_lock);
return dquot;
}
/*
* Get reference to dquot
*
* Locking is slightly tricky here. We are guarded from parallel quotaoff()
* destroying our dquot by:
* a) checking for quota flags under dq_list_lock and
* b) getting a reference to dquot before we release dq_list_lock
*/
struct dquot *dqget(struct super_block *sb, struct kqid qid)
{
unsigned int hashent = hashfn(sb, qid);
struct dquot *dquot, *empty = NULL;
if (!qid_has_mapping(sb->s_user_ns, qid))
return ERR_PTR(-EINVAL);
if (!sb_has_quota_active(sb, qid.type))
return ERR_PTR(-ESRCH);
we_slept:
spin_lock(&dq_list_lock);
spin_lock(&dq_state_lock);
if (!sb_has_quota_active(sb, qid.type)) {
spin_unlock(&dq_state_lock);
spin_unlock(&dq_list_lock);
dquot = ERR_PTR(-ESRCH);
goto out;
}
spin_unlock(&dq_state_lock);
dquot = find_dquot(hashent, sb, qid);
if (!dquot) {
if (!empty) {
spin_unlock(&dq_list_lock);
empty = get_empty_dquot(sb, qid.type);
if (!empty)
schedule(); /* Try to wait for a moment... */
goto we_slept;
}
dquot = empty;
empty = NULL;
dquot->dq_id = qid;
/* all dquots go on the inuse_list */
put_inuse(dquot);
/* hash it first so it can be found */
insert_dquot_hash(dquot);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
} else {
if (!atomic_read(&dquot->dq_count))
remove_free_dquot(dquot);
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_CACHE_HITS);
dqstats_inc(DQST_LOOKUPS);
}
/* Wait for dq_lock - after this we know that either dquot_release() is
* already finished or it will be canceled due to dq_count > 1 test */
wait_on_dquot(dquot);
/* Read the dquot / allocate space in quota file */
if (!dquot_active(dquot)) {
int err;
err = sb->dq_op->acquire_dquot(dquot);
if (err < 0) {
dqput(dquot);
dquot = ERR_PTR(err);
goto out;
}
}
/*
* Make sure following reads see filled structure - paired with
* smp_mb__before_atomic() in dquot_acquire().
*/
smp_rmb();
#ifdef CONFIG_QUOTA_DEBUG
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
out:
if (empty)
do_destroy_dquot(empty);
return dquot;
}
EXPORT_SYMBOL(dqget);
static inline struct dquot **i_dquot(struct inode *inode)
{
return inode->i_sb->s_op->get_dquots(inode);
}
static int dqinit_needed(struct inode *inode, int type)
{
struct dquot * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
return 0;
dquots = i_dquot(inode);
if (type != -1)
return !dquots[type];
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (!dquots[cnt])
return 1;
return 0;
}
/* This routine is guarded by s_umount semaphore */
static int add_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode, *old_inode = NULL;
#ifdef CONFIG_QUOTA_DEBUG
int reserved = 0;
#endif
int err = 0;
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
!atomic_read(&inode->i_writecount) ||
!dqinit_needed(inode, type)) {
spin_unlock(&inode->i_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
iput(old_inode);
err = __dquot_initialize(inode, type);
if (err) {
iput(inode);
goto out;
}
/*
* We hold a reference to 'inode' so it couldn't have been
* removed from s_inodes list while we dropped the
* s_inode_list_lock. We cannot iput the inode now as we can be
* holding the last reference and we cannot iput it under
* s_inode_list_lock. So we keep the reference and iput it
* later.
*/
old_inode = inode;
cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
iput(old_inode);
out:
#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
quota_error(sb, "Writes happened before quota was turned on "
"thus quota information is probably inconsistent. "
"Please run quotacheck(8)");
}
#endif
return err;
}
static void remove_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode;
#ifdef CONFIG_QUOTA_DEBUG
int reserved = 0;
#endif
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
/*
* We have to scan also I_NEW inodes because they can already
* have quota pointer initialized. Luckily, we need to touch
* only quota pointers and these have separate locking
* (dq_data_lock).
*/
spin_lock(&dq_data_lock);
if (!IS_NOQUOTA(inode)) {
struct dquot **dquots = i_dquot(inode);
struct dquot *dquot = dquots[type];
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
dquots[type] = NULL;
if (dquot)
dqput(dquot);
}
spin_unlock(&dq_data_lock);
}
spin_unlock(&sb->s_inode_list_lock);
#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened after quota"
" was disabled thus quota information is probably "
"inconsistent. Please run quotacheck(8).\n", sb->s_id);
}
#endif
}
/* Gather all references from inodes and drop them */
static void drop_dquot_ref(struct super_block *sb, int type)
{
if (sb->dq_op)
remove_dquot_ref(sb, type);
}
static inline
void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
{
if (dquot->dq_dqb.dqb_rsvspace >= number)
dquot->dq_dqb.dqb_rsvspace -= number;
else {
WARN_ON_ONCE(1);
dquot->dq_dqb.dqb_rsvspace = 0;
}
if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
dquot->dq_dqb.dqb_bsoftlimit)
dquot->dq_dqb.dqb_btime = (time64_t) 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curinodes >= number)
dquot->dq_dqb.dqb_curinodes -= number;
else
dquot->dq_dqb.dqb_curinodes = 0;
if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
dquot->dq_dqb.dqb_itime = (time64_t) 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
static void dquot_decr_space(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
dquot->dq_dqb.dqb_curspace >= number)
dquot->dq_dqb.dqb_curspace -= number;
else
dquot->dq_dqb.dqb_curspace = 0;
if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
dquot->dq_dqb.dqb_bsoftlimit)
dquot->dq_dqb.dqb_btime = (time64_t) 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
struct dquot_warn {
struct super_block *w_sb;
struct kqid w_dq_id;
short w_type;
};
static int warning_issued(struct dquot *dquot, const int warntype)
{
int flag = (warntype == QUOTA_NL_BHARDWARN ||
warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
((warntype == QUOTA_NL_IHARDWARN ||
warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
if (!flag)
return 0;
return test_and_set_bit(flag, &dquot->dq_flags);
}
#ifdef CONFIG_PRINT_QUOTA_WARNING
static int flag_print_warnings = 1;
static int need_print_warning(struct dquot_warn *warn)
{
if (!flag_print_warnings)
return 0;
switch (warn->w_dq_id.type) {
case USRQUOTA:
return uid_eq(current_fsuid(), warn->w_dq_id.uid);
case GRPQUOTA:
return in_group_p(warn->w_dq_id.gid);
case PRJQUOTA:
return 1;
}
return 0;
}
/* Print warning to user which exceeded quota */
static void print_warning(struct dquot_warn *warn)
{
char *msg = NULL;
struct tty_struct *tty;
int warntype = warn->w_type;
if (warntype == QUOTA_NL_IHARDBELOW ||
warntype == QUOTA_NL_ISOFTBELOW ||
warntype == QUOTA_NL_BHARDBELOW ||
warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
return;
tty = get_current_tty();
if (!tty)
return;
tty_write_message(tty, warn->w_sb->s_id);
if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
tty_write_message(tty, ": warning, ");
else
tty_write_message(tty, ": write failed, ");
tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
switch (warntype) {
case QUOTA_NL_IHARDWARN:
msg = " file limit reached.\r\n";
break;
case QUOTA_NL_ISOFTLONGWARN:
msg = " file quota exceeded too long.\r\n";
break;
case QUOTA_NL_ISOFTWARN:
msg = " file quota exceeded.\r\n";
break;
case QUOTA_NL_BHARDWARN:
msg = " block limit reached.\r\n";
break;
case QUOTA_NL_BSOFTLONGWARN:
msg = " block quota exceeded too long.\r\n";
break;
case QUOTA_NL_BSOFTWARN:
msg = " block quota exceeded.\r\n";
break;
}
tty_write_message(tty, msg);
tty_kref_put(tty);
}
#endif
static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
int warntype)
{
if (warning_issued(dquot, warntype))
return;
warn->w_type = warntype;
warn->w_sb = dquot->dq_sb;
warn->w_dq_id = dquot->dq_id;
}
/*
* Write warnings to the console and send warning messages over netlink.
*
* Note that this function can call into tty and networking code.
*/
static void flush_warnings(struct dquot_warn *warn)
{
int i;
for (i = 0; i < MAXQUOTAS; i++) {
if (warn[i].w_type == QUOTA_NL_NOWARN)
continue;
#ifdef CONFIG_PRINT_QUOTA_WARNING
print_warning(&warn[i]);
#endif
quota_send_warning(warn[i].w_dq_id,
warn[i].w_sb->s_dev, warn[i].w_type);
}
}
static int ignore_hardlimit(struct dquot *dquot)
{
struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
return capable(CAP_SYS_RESOURCE) &&
(info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
!(info->dqi_flags & DQF_ROOT_SQUASH));
}
static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
struct dquot_warn *warn)
{
qsize_t newinodes;
int ret = 0;
spin_lock(&dquot->dq_dqb_lock);
newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
goto add;
if (dquot->dq_dqb.dqb_ihardlimit &&
newinodes > dquot->dq_dqb.dqb_ihardlimit &&
!ignore_hardlimit(dquot)) {
prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
ret = -EDQUOT;
goto out;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
newinodes > dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_itime &&
ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
!ignore_hardlimit(dquot)) {
prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
ret = -EDQUOT;
goto out;
}
if (dquot->dq_dqb.dqb_isoftlimit &&
newinodes > dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_itime == 0) {
prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
}
add:
dquot->dq_dqb.dqb_curinodes = newinodes;
out:
spin_unlock(&dquot->dq_dqb_lock);
return ret;
}
static int dquot_add_space(struct dquot *dquot, qsize_t space,
qsize_t rsv_space, unsigned int flags,
struct dquot_warn *warn)
{
qsize_t tspace;
struct super_block *sb = dquot->dq_sb;
int ret = 0;
spin_lock(&dquot->dq_dqb_lock);
if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
goto finish;
tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ space + rsv_space;
if (dquot->dq_dqb.dqb_bhardlimit &&
tspace > dquot->dq_dqb.dqb_bhardlimit &&
!ignore_hardlimit(dquot)) {
if (flags & DQUOT_SPACE_WARN)
prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
ret = -EDQUOT;
goto finish;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime &&
ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
!ignore_hardlimit(dquot)) {
if (flags & DQUOT_SPACE_WARN)
prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
ret = -EDQUOT;
goto finish;
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime == 0) {
if (flags & DQUOT_SPACE_WARN) {
prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
} else {
/*
* We don't allow preallocation to exceed softlimit so exceeding will
* be always printed
*/
ret = -EDQUOT;
goto finish;
}
}
finish:
/*
* We have to be careful and go through warning generation & grace time
* setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
* only here...
*/
if (flags & DQUOT_SPACE_NOFAIL)
ret = 0;
if (!ret) {
dquot->dq_dqb.dqb_rsvspace += rsv_space;
dquot->dq_dqb.dqb_curspace += space;
}
spin_unlock(&dquot->dq_dqb_lock);
return ret;
}
static int info_idq_free(struct dquot *dquot, qsize_t inodes)
{
qsize_t newinodes;
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
return QUOTA_NL_NOWARN;
newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
return QUOTA_NL_ISOFTBELOW;
if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
newinodes < dquot->dq_dqb.dqb_ihardlimit)
return QUOTA_NL_IHARDBELOW;
return QUOTA_NL_NOWARN;
}
static int info_bdq_free(struct dquot *dquot, qsize_t space)
{
qsize_t tspace;
tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
tspace <= dquot->dq_dqb.dqb_bsoftlimit)
return QUOTA_NL_NOWARN;
if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
return QUOTA_NL_BSOFTBELOW;
if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
tspace - space < dquot->dq_dqb.dqb_bhardlimit)
return QUOTA_NL_BHARDBELOW;
return QUOTA_NL_NOWARN;
}
static int inode_quota_active(const struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (IS_NOQUOTA(inode))
return 0;
return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
}
/*
* Initialize quota pointers in inode
*
* It is better to call this function outside of any transaction as it
* might need a lot of space in journal for dquot structure allocation.
*/
static int __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
struct dquot **dquots, *got[MAXQUOTAS] = {};
struct super_block *sb = inode->i_sb;
qsize_t rsv;
int ret = 0;
if (!inode_quota_active(inode))
return 0;
dquots = i_dquot(inode);
/* First get references to structures we might need. */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
struct kqid qid;
kprojid_t projid;
int rc;
struct dquot *dquot;
if (type != -1 && cnt != type)
continue;
/*
* The i_dquot should have been initialized in most cases,
* we check it without locking here to avoid unnecessary
* dqget()/dqput() calls.
*/
if (dquots[cnt])
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
init_needed = 1;
switch (cnt) {
case USRQUOTA:
qid = make_kqid_uid(inode->i_uid);
break;
case GRPQUOTA:
qid = make_kqid_gid(inode->i_gid);
break;
case PRJQUOTA:
rc = inode->i_sb->dq_op->get_projid(inode, &projid);
if (rc)
continue;
qid = make_kqid_projid(projid);
break;
}
dquot = dqget(sb, qid);
if (IS_ERR(dquot)) {
/* We raced with somebody turning quotas off... */
if (PTR_ERR(dquot) != -ESRCH) {
ret = PTR_ERR(dquot);
goto out_put;
}
dquot = NULL;
}
got[cnt] = dquot;
}
/* All required i_dquot has been initialized */
if (!init_needed)
return 0;
spin_lock(&dq_data_lock);
if (IS_NOQUOTA(inode))
goto out_lock;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
/* Avoid races with quotaoff() */
if (!sb_has_quota_active(sb, cnt))
continue;
/* We could race with quotaon or dqget() could have failed */
if (!got[cnt])
continue;
if (!dquots[cnt]) {
dquots[cnt] = got[cnt];
got[cnt] = NULL;
/*
* Make quota reservation system happy if someone
* did a write before quota was turned on
*/
rsv = inode_get_rsv_space(inode);
if (unlikely(rsv)) {
spin_lock(&inode->i_lock);
/* Get reservation again under proper lock */
rsv = __inode_get_rsv_space(inode);
spin_lock(&dquots[cnt]->dq_dqb_lock);
dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
spin_unlock(&dquots[cnt]->dq_dqb_lock);
spin_unlock(&inode->i_lock);
}
}
}
out_lock:
spin_unlock(&dq_data_lock);
out_put:
/* Drop unused references */
dqput_all(got);
return ret;
}
int dquot_initialize(struct inode *inode)
{
return __dquot_initialize(inode, -1);
}
EXPORT_SYMBOL(dquot_initialize);
bool dquot_initialize_needed(struct inode *inode)
{
struct dquot **dquots;
int i;
if (!inode_quota_active(inode))
return false;
dquots = i_dquot(inode);
for (i = 0; i < MAXQUOTAS; i++)
if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
return true;
return false;
}
EXPORT_SYMBOL(dquot_initialize_needed);
/*
* Release all quotas referenced by inode.
*
* This function only be called on inode free or converting
* a file to quota file, no other users for the i_dquot in
* both cases, so we needn't call synchronize_srcu() after
* clearing i_dquot.
*/
static void __dquot_drop(struct inode *inode)
{
int cnt;
struct dquot **dquots = i_dquot(inode);
struct dquot *put[MAXQUOTAS];
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
put[cnt] = dquots[cnt];
dquots[cnt] = NULL;
}
spin_unlock(&dq_data_lock);
dqput_all(put);
}
void dquot_drop(struct inode *inode)
{
struct dquot * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
return;
/*
* Test before calling to rule out calls from proc and such
* where we are not allowed to block. Note that this is
* actually reliable test even without the lock - the caller
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway.
*/
dquots = i_dquot(inode);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquots[cnt])
break;
}
if (cnt < MAXQUOTAS)
__dquot_drop(inode);
}
EXPORT_SYMBOL(dquot_drop);
/*
* inode_reserved_space is managed internally by quota, and protected by
* i_lock similar to i_blocks+i_bytes.
*/
static qsize_t *inode_reserved_space(struct inode * inode)
{
/* Filesystem must explicitly define it's own method in order to use
* quota reservation interface */
BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
return inode->i_sb->dq_op->get_reserved_space(inode);
}
static qsize_t __inode_get_rsv_space(struct inode *inode)
{
if (!inode->i_sb->dq_op->get_reserved_space)
return 0;
return *inode_reserved_space(inode);
}
static qsize_t inode_get_rsv_space(struct inode *inode)
{
qsize_t ret;
if (!inode->i_sb->dq_op->get_reserved_space)
return 0;
spin_lock(&inode->i_lock);
ret = __inode_get_rsv_space(inode);
spin_unlock(&inode->i_lock);
return ret;
}
/*
* This functions updates i_blocks+i_bytes fields and quota information
* (together with appropriate checks).
*
* NOTE: We absolutely rely on the fact that caller dirties the inode
* (usually helpers in quotaops.h care about this) and holds a handle for
* the current transaction so that dquot write and inode write go into the
* same transaction.
*/
/*
* This operation can block, but only after everything is updated
*/
int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
int reserve = flags & DQUOT_SPACE_RESERVE;
struct dquot **dquots;
if (!inode_quota_active(inode)) {
if (reserve) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) += number;
spin_unlock(&inode->i_lock);
} else {
inode_add_bytes(inode, number);
}
goto out;
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt])
continue;
if (reserve) {
ret = dquot_add_space(dquots[cnt], 0, number, flags,
&warn[cnt]);
} else {
ret = dquot_add_space(dquots[cnt], number, 0, flags,
&warn[cnt]);
}
if (ret) {
/* Back out changes we already did */
for (cnt--; cnt >= 0; cnt--) {
if (!dquots[cnt])
continue;
spin_lock(&dquots[cnt]->dq_dqb_lock);
if (reserve)
dquot_free_reserved_space(dquots[cnt],
number);
else
dquot_decr_space(dquots[cnt], number);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
goto out_flush_warn;
}
}
if (reserve)
*inode_reserved_space(inode) += number;
else
__inode_add_bytes(inode, number);
spin_unlock(&inode->i_lock);
if (reserve)
goto out_flush_warn;
mark_all_dquot_dirty(dquots);
out_flush_warn:
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn);
out:
return ret;
}
EXPORT_SYMBOL(__dquot_alloc_space);
/*
* This operation can block, but only after everything is updated
*/
int dquot_alloc_inode(struct inode *inode)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
struct dquot * const *dquots;
if (!inode_quota_active(inode))
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt])
continue;
ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
if (ret) {
for (cnt--; cnt >= 0; cnt--) {
if (!dquots[cnt])
continue;
/* Back out changes we already did */
spin_lock(&dquots[cnt]->dq_dqb_lock);
dquot_decr_inodes(dquots[cnt], 1);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
}
goto warn_put_all;
}
}
warn_put_all:
spin_unlock(&inode->i_lock);
if (ret == 0)
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn);
return ret;
}
EXPORT_SYMBOL(dquot_alloc_inode);
/*
* Convert in-memory reserved quotas to real consumed quotas
*/
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
struct dquot **dquots;
int cnt, index;
if (!inode_quota_active(inode)) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
__inode_add_bytes(inode, number);
spin_unlock(&inode->i_lock);
return 0;
}
dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquots[cnt]) {
struct dquot *dquot = dquots[cnt];
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
number = dquot->dq_dqb.dqb_rsvspace;
dquot->dq_dqb.dqb_curspace += number;
dquot->dq_dqb.dqb_rsvspace -= number;
spin_unlock(&dquot->dq_dqb_lock);
}
}
/* Update inode bytes */
*inode_reserved_space(inode) -= number;
__inode_add_bytes(inode, number);
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
return 0;
}
EXPORT_SYMBOL(dquot_claim_space_nodirty);
/*
* Convert allocated space back to in-memory reserved quotas
*/
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
{
struct dquot **dquots;
int cnt, index;
if (!inode_quota_active(inode)) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) += number;
__inode_sub_bytes(inode, number);
spin_unlock(&inode->i_lock);
return;
}
dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (dquots[cnt]) {
struct dquot *dquot = dquots[cnt];
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
number = dquot->dq_dqb.dqb_curspace;
dquot->dq_dqb.dqb_rsvspace += number;
dquot->dq_dqb.dqb_curspace -= number;
spin_unlock(&dquot->dq_dqb_lock);
}
}
/* Update inode bytes */
*inode_reserved_space(inode) += number;
__inode_sub_bytes(inode, number);
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
return;
}
EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
/*
* This operation can block, but only after everything is updated
*/
void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
struct dquot **dquots;
int reserve = flags & DQUOT_SPACE_RESERVE, index;
if (!inode_quota_active(inode)) {
if (reserve) {
spin_lock(&inode->i_lock);
*inode_reserved_space(inode) -= number;
spin_unlock(&inode->i_lock);
} else {
inode_sub_bytes(inode, number);
}
return;
}
dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype;
warn[cnt].w_type = QUOTA_NL_NOWARN;
if (!dquots[cnt])
continue;
spin_lock(&dquots[cnt]->dq_dqb_lock);
wtype = info_bdq_free(dquots[cnt], number);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn[cnt], dquots[cnt], wtype);
if (reserve)
dquot_free_reserved_space(dquots[cnt], number);
else
dquot_decr_space(dquots[cnt], number);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
}
if (reserve)
*inode_reserved_space(inode) -= number;
else
__inode_sub_bytes(inode, number);
spin_unlock(&inode->i_lock);
if (reserve)
goto out_unlock;
mark_all_dquot_dirty(dquots);
out_unlock:
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn);
}
EXPORT_SYMBOL(__dquot_free_space);
/*
* This operation can block, but only after everything is updated
*/
void dquot_free_inode(struct inode *inode)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
struct dquot * const *dquots;
int index;
if (!inode_quota_active(inode))
return;
dquots = i_dquot(inode);
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype;
warn[cnt].w_type = QUOTA_NL_NOWARN;
if (!dquots[cnt])
continue;
spin_lock(&dquots[cnt]->dq_dqb_lock);
wtype = info_idq_free(dquots[cnt], 1);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn[cnt], dquots[cnt], wtype);
dquot_decr_inodes(dquots[cnt], 1);
spin_unlock(&dquots[cnt]->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn);
}
EXPORT_SYMBOL(dquot_free_inode);
/*
* Transfer the number of inode and blocks from one diskquota to an other.
* On success, dquot references in transfer_to are consumed and references
* to original dquots that need to be released are placed there. On failure,
* references are kept untouched.
*
* This operation can block, but only after everything is updated
* A transaction must be started when entering this function.
*
* We are holding reference on transfer_from & transfer_to, no need to
* protect them by srcu_read_lock().
*/
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
{
qsize_t cur_space;
qsize_t rsv_space = 0;
qsize_t inode_usage = 1;
struct dquot *transfer_from[MAXQUOTAS] = {};
int cnt, ret = 0;
char is_valid[MAXQUOTAS] = {};
struct dquot_warn warn_to[MAXQUOTAS];
struct dquot_warn warn_from_inodes[MAXQUOTAS];
struct dquot_warn warn_from_space[MAXQUOTAS];
if (IS_NOQUOTA(inode))
return 0;
if (inode->i_sb->dq_op->get_inode_usage) {
ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
if (ret)
return ret;
}
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
warn_to[cnt].w_type = QUOTA_NL_NOWARN;
warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
}
spin_lock(&dq_data_lock);
spin_lock(&inode->i_lock);
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
spin_unlock(&inode->i_lock);
spin_unlock(&dq_data_lock);
return 0;
}
cur_space = __inode_get_bytes(inode);
rsv_space = __inode_get_rsv_space(inode);
/*
* Build the transfer_from list, check limits, and update usage in
* the target structures.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
/*
* Skip changes for same uid or gid or for turned off quota-type.
*/
if (!transfer_to[cnt])
continue;
/* Avoid races with quotaoff() */
if (!sb_has_quota_active(inode->i_sb, cnt))
continue;
is_valid[cnt] = 1;
transfer_from[cnt] = i_dquot(inode)[cnt];
ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
&warn_to[cnt]);
if (ret)
goto over_quota;
ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
DQUOT_SPACE_WARN, &warn_to[cnt]);
if (ret) {
spin_lock(&transfer_to[cnt]->dq_dqb_lock);
dquot_decr_inodes(transfer_to[cnt], inode_usage);
spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
goto over_quota;
}
}
/* Decrease usage for source structures and update quota pointers */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!is_valid[cnt])
continue;
/* Due to IO error we might not have transfer_from[] structure */
if (transfer_from[cnt]) {
int wtype;
spin_lock(&transfer_from[cnt]->dq_dqb_lock);
wtype = info_idq_free(transfer_from[cnt], inode_usage);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn_from_inodes[cnt],
transfer_from[cnt], wtype);
wtype = info_bdq_free(transfer_from[cnt],
cur_space + rsv_space);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn_from_space[cnt],
transfer_from[cnt], wtype);
dquot_decr_inodes(transfer_from[cnt], inode_usage);
dquot_decr_space(transfer_from[cnt], cur_space);
dquot_free_reserved_space(transfer_from[cnt],
rsv_space);
spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
}
i_dquot(inode)[cnt] = transfer_to[cnt];
}
spin_unlock(&inode->i_lock);
spin_unlock(&dq_data_lock);
mark_all_dquot_dirty(transfer_from);
mark_all_dquot_dirty(transfer_to);
flush_warnings(warn_to);
flush_warnings(warn_from_inodes);
flush_warnings(warn_from_space);
/* Pass back references to put */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (is_valid[cnt])
transfer_to[cnt] = transfer_from[cnt];
return 0;
over_quota:
/* Back out changes we already did */
for (cnt--; cnt >= 0; cnt--) {
if (!is_valid[cnt])
continue;
spin_lock(&transfer_to[cnt]->dq_dqb_lock);
dquot_decr_inodes(transfer_to[cnt], inode_usage);
dquot_decr_space(transfer_to[cnt], cur_space);
dquot_free_reserved_space(transfer_to[cnt], rsv_space);
spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
spin_unlock(&dq_data_lock);
flush_warnings(warn_to);
return ret;
}
EXPORT_SYMBOL(__dquot_transfer);
/* Wrapper for transferring ownership of an inode for uid/gid only
* Called from FSXXX_setattr()
*/
int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
struct iattr *iattr)
{
struct dquot *transfer_to[MAXQUOTAS] = {};
struct dquot *dquot;
struct super_block *sb = inode->i_sb;
int ret;
if (!inode_quota_active(inode))
return 0;
if (i_uid_needs_update(idmap, iattr, inode)) {
kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode),
iattr->ia_vfsuid);
dquot = dqget(sb, make_kqid_uid(kuid));
if (IS_ERR(dquot)) {
if (PTR_ERR(dquot) != -ESRCH) {
ret = PTR_ERR(dquot);
goto out_put;
}
dquot = NULL;
}
transfer_to[USRQUOTA] = dquot;
}
if (i_gid_needs_update(idmap, iattr, inode)) {
kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode),
iattr->ia_vfsgid);
dquot = dqget(sb, make_kqid_gid(kgid));
if (IS_ERR(dquot)) {
if (PTR_ERR(dquot) != -ESRCH) {
ret = PTR_ERR(dquot);
goto out_put;
}
dquot = NULL;
}
transfer_to[GRPQUOTA] = dquot;
}
ret = __dquot_transfer(inode, transfer_to);
out_put:
dqput_all(transfer_to);
return ret;
}
EXPORT_SYMBOL(dquot_transfer);
/*
* Write info of quota file to disk
*/
int dquot_commit_info(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
return dqopt->ops[type]->write_file_info(sb, type);
}
EXPORT_SYMBOL(dquot_commit_info);
int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct quota_info *dqopt = sb_dqopt(sb);
if (!sb_has_quota_active(sb, qid->type))
return -ESRCH;
if (!dqopt->ops[qid->type]->get_next_id)
return -ENOSYS;
return dqopt->ops[qid->type]->get_next_id(sb, qid);
}
EXPORT_SYMBOL(dquot_get_next_id);
/*
* Definitions of diskquota operations.
*/
const struct dquot_operations dquot_operations = {
.write_dquot = dquot_commit,
.acquire_dquot = dquot_acquire,
.release_dquot = dquot_release,
.mark_dirty = dquot_mark_dquot_dirty,
.write_info = dquot_commit_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_next_id = dquot_get_next_id,
};
EXPORT_SYMBOL(dquot_operations);
/*
* Generic helper for ->open on filesystems supporting disk quotas.
*/
int dquot_file_open(struct inode *inode, struct file *file)
{
int error;
error = generic_file_open(inode, file);
if (!error && (file->f_mode & FMODE_WRITE))
error = dquot_initialize(inode);
return error;
}
EXPORT_SYMBOL(dquot_file_open);
static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
struct inode *inode = dqopt->files[type];
if (!inode)
return;
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
inode_lock(inode);
inode->i_flags &= ~S_NOQUOTA;
inode_unlock(inode);
}
dqopt->files[type] = NULL;
iput(inode);
}
/*
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
*/
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
{
int cnt;
struct quota_info *dqopt = sb_dqopt(sb);
/* s_umount should be held in exclusive mode */
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
up_read(&sb->s_umount);
/* Cannot turn off usage accounting without turning off limits, or
* suspend quotas and simultaneously turn quotas off. */
if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
|| (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
DQUOT_USAGE_ENABLED)))
return -EINVAL;
/*
* Skip everything if there's nothing to do. We have to do this because
* sometimes we are called when fill_super() failed and calling
* sync_fs() in such cases does no good.
*/
if (!sb_any_quota_loaded(sb))
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_loaded(sb, cnt))
continue;
if (flags & DQUOT_SUSPENDED) {
spin_lock(&dq_state_lock);
dqopt->flags |=
dquot_state_flag(DQUOT_SUSPENDED, cnt);
spin_unlock(&dq_state_lock);
} else {
spin_lock(&dq_state_lock);
dqopt->flags &= ~dquot_state_flag(flags, cnt);
/* Turning off suspended quotas? */
if (!sb_has_quota_loaded(sb, cnt) &&
sb_has_quota_suspended(sb, cnt)) {
dqopt->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
spin_unlock(&dq_state_lock);
vfs_cleanup_quota_inode(sb, cnt);
continue;
}
spin_unlock(&dq_state_lock);
}
/* We still have to keep quota loaded? */
if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
continue;
/* Note: these are blocking operations */
drop_dquot_ref(sb, cnt);
invalidate_dquots(sb, cnt);
/*
* Now all dquots should be invalidated, all writes done so we
* should be only users of the info. No locks needed.
*/
if (info_dirty(&dqopt->info[cnt]))
sb->dq_op->write_info(sb, cnt);
if (dqopt->ops[cnt]->free_file_info)
dqopt->ops[cnt]->free_file_info(sb, cnt);
put_quota_format(dqopt->info[cnt].dqi_format);
dqopt->info[cnt].dqi_flags = 0;
dqopt->info[cnt].dqi_igrace = 0;
dqopt->info[cnt].dqi_bgrace = 0;
dqopt->ops[cnt] = NULL;
}
/* Skip syncing and setting flags if quota files are hidden */
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
goto put_inodes;
/* Sync the superblock so that buffers with quota data are written to
* disk (and so userspace sees correct data afterwards). */
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
/* Now the quota files are just ordinary files and we can set the
* inode flags back. Moreover we discard the pagecache so that
* userspace sees the writes we did bypassing the pagecache. We
* must also discard the blockdev buffers so that we see the
* changes done by userspace on the next quotaon() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
inode_lock(dqopt->files[cnt]);
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
inode_unlock(dqopt->files[cnt]);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev);
put_inodes:
/* We are done when suspending quotas */
if (flags & DQUOT_SUSPENDED)
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (!sb_has_quota_loaded(sb, cnt))
vfs_cleanup_quota_inode(sb, cnt);
return 0;
}
EXPORT_SYMBOL(dquot_disable);
int dquot_quota_off(struct super_block *sb, int type)
{
return dquot_disable(sb, type,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
}
EXPORT_SYMBOL(dquot_quota_off);
/*
* Turn quotas on on a device
*/
static int vfs_setup_quota_inode(struct inode *inode, int type)
{
struct super_block *sb = inode->i_sb;
struct quota_info *dqopt = sb_dqopt(sb);
if (is_bad_inode(inode))
return -EUCLEAN;
if (!S_ISREG(inode->i_mode))
return -EACCES;
if (IS_RDONLY(inode))
return -EROFS;
if (sb_has_quota_loaded(sb, type))
return -EBUSY;
dqopt->files[type] = igrab(inode);
if (!dqopt->files[type])
return -EIO;
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
/* We don't want quota and atime on quota files (deadlocks
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
inode_lock(inode);
inode->i_flags |= S_NOQUOTA;
inode_unlock(inode);
/*
* When S_NOQUOTA is set, remove dquot references as no more
* references can be added
*/
__dquot_drop(inode);
}
return 0;
}
int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
struct quota_format_type *fmt = find_quota_format(format_id);
struct quota_info *dqopt = sb_dqopt(sb);
int error;
lockdep_assert_held_write(&sb->s_umount);
/* Just unsuspend quotas? */
BUG_ON(flags & DQUOT_SUSPENDED);
if (!fmt)
return -ESRCH;
if (!sb->dq_op || !sb->s_qcop ||
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
error = -EINVAL;
goto out_fmt;
}
/* Filesystems outside of init_user_ns not yet supported */
if (sb->s_user_ns != &init_user_ns) {
error = -EINVAL;
goto out_fmt;
}
/* Usage always has to be set... */
if (!(flags & DQUOT_USAGE_ENABLED)) {
error = -EINVAL;
goto out_fmt;
}
if (sb_has_quota_loaded(sb, type)) {
error = -EBUSY;
goto out_fmt;
}
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
/* As we bypass the pagecache we must now flush all the
* dirty data and invalidate caches so that kernel sees
* changes from userspace. It is not enough to just flush
* the quota file since if blocksize < pagesize, invalidation
* of the cache could fail because of other unrelated dirty
* data */
sync_filesystem(sb);
invalidate_bdev(sb->s_bdev);
}
error = -EINVAL;
if (!fmt->qf_ops->check_quota_file(sb, type))
goto out_fmt;
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
dqopt->info[type].dqi_fmt_id = format_id;
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
error = dqopt->ops[type]->read_file_info(sb, type);
if (error < 0)
goto out_fmt;
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
spin_lock(&dq_data_lock);
dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
spin_unlock(&dq_data_lock);
}
spin_lock(&dq_state_lock);
dqopt->flags |= dquot_state_flag(flags, type);
spin_unlock(&dq_state_lock);
error = add_dquot_ref(sb, type);
if (error)
dquot_disable(sb, type,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
return error;
out_fmt:
put_quota_format(fmt);
return error;
}
EXPORT_SYMBOL(dquot_load_quota_sb);
/*
* More powerful function for turning on quotas on given quota inode allowing
* setting of individual quota flags
*/
int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
unsigned int flags)
{
int err;
err = vfs_setup_quota_inode(inode, type);
if (err < 0)
return err;
err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
if (err < 0)
vfs_cleanup_quota_inode(inode->i_sb, type);
return err;
}
EXPORT_SYMBOL(dquot_load_quota_inode);
/* Reenable quotas on remount RW */
int dquot_resume(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
int ret = 0, cnt;
unsigned int flags;
/* s_umount should be held in exclusive mode */
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
up_read(&sb->s_umount);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_suspended(sb, cnt))
continue;
spin_lock(&dq_state_lock);
flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED,
cnt);
dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
spin_unlock(&dq_state_lock);
flags = dquot_generic_flag(flags, cnt);
ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
flags);
if (ret < 0)
vfs_cleanup_quota_inode(sb, cnt);
}
return ret;
}
EXPORT_SYMBOL(dquot_resume);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path)
{
int error = security_quota_on(path->dentry);
if (error)
return error;
/* Quota file not on the same filesystem? */
if (path->dentry->d_sb != sb)
error = -EXDEV;
else
error = dquot_load_quota_inode(d_inode(path->dentry), type,
format_id, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
return error;
}
EXPORT_SYMBOL(dquot_quota_on);
/*
* This function is used when filesystem needs to initialize quotas
* during mount time.
*/
int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type)
{
struct dentry *dentry;
int error;
dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
if (IS_ERR(dentry))
return PTR_ERR(dentry);
error = security_quota_on(dentry);
if (!error)
error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
dput(dentry);
return error;
}
EXPORT_SYMBOL(dquot_quota_on_mount);
static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
{
int ret;
int type;
struct quota_info *dqopt = sb_dqopt(sb);
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
return -ENOSYS;
/* Accounting cannot be turned on while fs is mounted */
flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
if (!flags)
return -EINVAL;
for (type = 0; type < MAXQUOTAS; type++) {
if (!(flags & qtype_enforce_flag(type)))
continue;
/* Can't enforce without accounting */
if (!sb_has_quota_usage_enabled(sb, type)) {
ret = -EINVAL;
goto out_err;
}
if (sb_has_quota_limits_enabled(sb, type)) {
ret = -EBUSY;
goto out_err;
}
spin_lock(&dq_state_lock);
dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
spin_unlock(&dq_state_lock);
}
return 0;
out_err:
/* Backout enforcement enablement we already did */
for (type--; type >= 0; type--) {
if (flags & qtype_enforce_flag(type))
dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
}
/* Error code translation for better compatibility with XFS */
if (ret == -EBUSY)
ret = -EEXIST;
return ret;
}
static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
{
int ret;
int type;
struct quota_info *dqopt = sb_dqopt(sb);
if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
return -ENOSYS;
/*
* We don't support turning off accounting via quotactl. In principle
* quota infrastructure can do this but filesystems don't expect
* userspace to be able to do it.
*/
if (flags &
(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
return -EOPNOTSUPP;
/* Filter out limits not enabled */
for (type = 0; type < MAXQUOTAS; type++)
if (!sb_has_quota_limits_enabled(sb, type))
flags &= ~qtype_enforce_flag(type);
/* Nothing left? */
if (!flags)
return -EEXIST;
for (type = 0; type < MAXQUOTAS; type++) {
if (flags & qtype_enforce_flag(type)) {
ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
if (ret < 0)
goto out_err;
}
}
return 0;
out_err:
/* Backout enforcement disabling we already did */
for (type--; type >= 0; type--) {
if (flags & qtype_enforce_flag(type)) {
spin_lock(&dq_state_lock);
dqopt->flags |=
dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
spin_unlock(&dq_state_lock);
}
}
return ret;
}
/* Generic routine for getting common part of quota structure */
static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
memset(di, 0, sizeof(*di));
spin_lock(&dquot->dq_dqb_lock);
di->d_spc_hardlimit = dm->dqb_bhardlimit;
di->d_spc_softlimit = dm->dqb_bsoftlimit;
di->d_ino_hardlimit = dm->dqb_ihardlimit;
di->d_ino_softlimit = dm->dqb_isoftlimit;
di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
di->d_ino_count = dm->dqb_curinodes;
di->d_spc_timer = dm->dqb_btime;
di->d_ino_timer = dm->dqb_itime;
spin_unlock(&dquot->dq_dqb_lock);
}
int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
struct qc_dqblk *di)
{
struct dquot *dquot;
dquot = dqget(sb, qid);
if (IS_ERR(dquot))
return PTR_ERR(dquot);
do_get_dqblk(dquot, di);
dqput(dquot);
return 0;
}
EXPORT_SYMBOL(dquot_get_dqblk);
int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
struct qc_dqblk *di)
{
struct dquot *dquot;
int err;
if (!sb->dq_op->get_next_id)
return -ENOSYS;
err = sb->dq_op->get_next_id(sb, qid);
if (err < 0)
return err;
dquot = dqget(sb, *qid);
if (IS_ERR(dquot))
return PTR_ERR(dquot);
do_get_dqblk(dquot, di);
dqput(dquot);
return 0;
}
EXPORT_SYMBOL(dquot_get_next_dqblk);
#define VFS_QC_MASK \
(QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
QC_SPC_TIMER | QC_INO_TIMER)
/* Generic routine for setting common part of quota structure */
static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
if (di->d_fieldmask & ~VFS_QC_MASK)
return -EINVAL;
if (((di->d_fieldmask & QC_SPC_SOFT) &&
di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
((di->d_fieldmask & QC_SPC_HARD) &&
di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
((di->d_fieldmask & QC_INO_SOFT) &&
(di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
((di->d_fieldmask & QC_INO_HARD) &&
(di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
return -ERANGE;
spin_lock(&dquot->dq_dqb_lock);
if (di->d_fieldmask & QC_SPACE) {
dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
if (di->d_fieldmask & QC_SPC_SOFT)
dm->dqb_bsoftlimit = di->d_spc_softlimit;
if (di->d_fieldmask & QC_SPC_HARD)
dm->dqb_bhardlimit = di->d_spc_hardlimit;
if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
if (di->d_fieldmask & QC_INO_COUNT) {
dm->dqb_curinodes = di->d_ino_count;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
if (di->d_fieldmask & QC_INO_SOFT)
dm->dqb_isoftlimit = di->d_ino_softlimit;
if (di->d_fieldmask & QC_INO_HARD)
dm->dqb_ihardlimit = di->d_ino_hardlimit;
if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
if (di->d_fieldmask & QC_SPC_TIMER) {
dm->dqb_btime = di->d_spc_timer;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
if (di->d_fieldmask & QC_INO_TIMER) {
dm->dqb_itime = di->d_ino_timer;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
if (check_blim) {
if (!dm->dqb_bsoftlimit ||
dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_SPC_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
}
if (check_ilim) {
if (!dm->dqb_isoftlimit ||
dm->dqb_curinodes <= dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_INO_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
}
if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
dm->dqb_isoftlimit)
clear_bit(DQ_FAKE_B, &dquot->dq_flags);
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dquot->dq_dqb_lock);
mark_dquot_dirty(dquot);
return 0;
}
int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
struct qc_dqblk *di)
{
struct dquot *dquot;
int rc;
dquot = dqget(sb, qid);
if (IS_ERR(dquot)) {
rc = PTR_ERR(dquot);
goto out;
}
rc = do_set_dqblk(dquot, di);
dqput(dquot);
out:
return rc;
}
EXPORT_SYMBOL(dquot_set_dqblk);
/* Generic routine for getting common part of quota file information */
int dquot_get_state(struct super_block *sb, struct qc_state *state)
{
struct mem_dqinfo *mi;
struct qc_type_state *tstate;
struct quota_info *dqopt = sb_dqopt(sb);
int type;
memset(state, 0, sizeof(*state));
for (type = 0; type < MAXQUOTAS; type++) {
if (!sb_has_quota_active(sb, type))
continue;
tstate = state->s_state + type;
mi = sb_dqopt(sb)->info + type;
tstate->flags = QCI_ACCT_ENABLED;
spin_lock(&dq_data_lock);
if (mi->dqi_flags & DQF_SYS_FILE)
tstate->flags |= QCI_SYSFILE;
if (mi->dqi_flags & DQF_ROOT_SQUASH)
tstate->flags |= QCI_ROOT_SQUASH;
if (sb_has_quota_limits_enabled(sb, type))
tstate->flags |= QCI_LIMITS_ENFORCED;
tstate->spc_timelimit = mi->dqi_bgrace;
tstate->ino_timelimit = mi->dqi_igrace;
if (dqopt->files[type]) {
tstate->ino = dqopt->files[type]->i_ino;
tstate->blocks = dqopt->files[type]->i_blocks;
}
tstate->nextents = 1; /* We don't know... */
spin_unlock(&dq_data_lock);
}
return 0;
}
EXPORT_SYMBOL(dquot_get_state);
/* Generic routine for setting common part of quota file information */
int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
{
struct mem_dqinfo *mi;
if ((ii->i_fieldmask & QC_WARNS_MASK) ||
(ii->i_fieldmask & QC_RT_SPC_TIMER))
return -EINVAL;
if (!sb_has_quota_active(sb, type))
return -ESRCH;
mi = sb_dqopt(sb)->info + type;
if (ii->i_fieldmask & QC_FLAGS) {
if ((ii->i_flags & QCI_ROOT_SQUASH &&
mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
return -EINVAL;
}
spin_lock(&dq_data_lock);
if (ii->i_fieldmask & QC_SPC_TIMER)
mi->dqi_bgrace = ii->i_spc_timelimit;
if (ii->i_fieldmask & QC_INO_TIMER)
mi->dqi_igrace = ii->i_ino_timelimit;
if (ii->i_fieldmask & QC_FLAGS) {
if (ii->i_flags & QCI_ROOT_SQUASH)
mi->dqi_flags |= DQF_ROOT_SQUASH;
else
mi->dqi_flags &= ~DQF_ROOT_SQUASH;
}
spin_unlock(&dq_data_lock);
mark_info_dirty(sb, type);
/* Force write to disk */
return sb->dq_op->write_info(sb, type);
}
EXPORT_SYMBOL(dquot_set_dqinfo);
const struct quotactl_ops dquot_quotactl_sysfile_ops = {
.quota_enable = dquot_quota_enable,
.quota_disable = dquot_quota_disable,
.quota_sync = dquot_quota_sync,
.get_state = dquot_get_state,
.set_info = dquot_set_dqinfo,
.get_dqblk = dquot_get_dqblk,
.get_nextdqblk = dquot_get_next_dqblk,
.set_dqblk = dquot_set_dqblk
};
EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int type = (unsigned long *)table->data - dqstats.stat;
s64 value = percpu_counter_sum(&dqstats.counter[type]);
/* Filter negative values for non-monotonic counters */
if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
type == DQST_FREE_DQUOTS))
value = 0;
/* Update global table */
dqstats.stat[type] = value;
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
#ifdef CONFIG_PRINT_QUOTA_WARNING
{
.procname = "warnings",
.data = &flag_print_warnings,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{ },
};
static int __init dquot_init(void)
{
int i, ret;
unsigned long nr_hash, order;
printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
register_sysctl_init("fs/quota", fs_dqstats_table);
dquot_cachep = kmem_cache_create("dquot",
sizeof(struct dquot), sizeof(unsigned long) * 4,
(SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_PANIC),
NULL);
order = 0;
dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
if (!dquot_hash)
panic("Cannot create dquot hash table");
for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
if (ret)
panic("Cannot create dquot stat counters");
}
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
dq_hash_bits = ilog2(nr_hash);
nr_hash = 1UL << dq_hash_bits;
dq_hash_mask = nr_hash - 1;
for (i = 0; i < nr_hash; i++)
INIT_HLIST_HEAD(dquot_hash + i);
pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
panic("Cannot register dquot shrinker");
return 0;
}
fs_initcall(dquot_init);
| linux-master | fs/quota/dquot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Quota code necessary even when VFS quota support is not compiled
* into the kernel. The interesting stuff is over in dquot.c, here
* we have symbols for initial quotactl(2) handling, the sysctl(2)
* variables, etc - things needed even when quota support disabled.
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <asm/current.h>
#include <linux/blkdev.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/mount.h>
#include <linux/writeback.h>
#include <linux/nospec.h>
#include "compat.h"
#include "../internal.h"
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
{
switch (cmd) {
/* these commands do not require any special privilegues */
case Q_GETFMT:
case Q_SYNC:
case Q_GETINFO:
case Q_XGETQSTAT:
case Q_XGETQSTATV:
case Q_XQUOTASYNC:
break;
/* allow to query information for dquots we "own" */
case Q_GETQUOTA:
case Q_XGETQUOTA:
if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
(type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
break;
fallthrough;
default:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
return security_quotactl(cmd, type, id, sb);
}
static void quota_sync_one(struct super_block *sb, void *arg)
{
int type = *(int *)arg;
if (sb->s_qcop && sb->s_qcop->quota_sync &&
(sb->s_quota_types & (1 << type)))
sb->s_qcop->quota_sync(sb, type);
}
static int quota_sync_all(int type)
{
int ret;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
if (!ret)
iterate_supers(quota_sync_one, &type);
return ret;
}
unsigned int qtype_enforce_flag(int type)
{
switch (type) {
case USRQUOTA:
return FS_QUOTA_UDQ_ENFD;
case GRPQUOTA:
return FS_QUOTA_GDQ_ENFD;
case PRJQUOTA:
return FS_QUOTA_PDQ_ENFD;
}
return 0;
}
static int quota_quotaon(struct super_block *sb, int type, qid_t id,
const struct path *path)
{
if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
return -ENOSYS;
if (sb->s_qcop->quota_enable)
return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
if (IS_ERR(path))
return PTR_ERR(path);
return sb->s_qcop->quota_on(sb, type, id, path);
}
static int quota_quotaoff(struct super_block *sb, int type)
{
if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
return -ENOSYS;
if (sb->s_qcop->quota_disable)
return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
return sb->s_qcop->quota_off(sb, type);
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
if (!sb_has_quota_active(sb, type))
return -ESRCH;
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
}
static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
{
struct qc_state state;
struct qc_type_state *tstate;
struct if_dqinfo uinfo;
int ret;
if (!sb->s_qcop->get_state)
return -ENOSYS;
ret = sb->s_qcop->get_state(sb, &state);
if (ret)
return ret;
tstate = state.s_state + type;
if (!(tstate->flags & QCI_ACCT_ENABLED))
return -ESRCH;
memset(&uinfo, 0, sizeof(uinfo));
uinfo.dqi_bgrace = tstate->spc_timelimit;
uinfo.dqi_igrace = tstate->ino_timelimit;
if (tstate->flags & QCI_SYSFILE)
uinfo.dqi_flags |= DQF_SYS_FILE;
if (tstate->flags & QCI_ROOT_SQUASH)
uinfo.dqi_flags |= DQF_ROOT_SQUASH;
uinfo.dqi_valid = IIF_ALL;
if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
return -EFAULT;
return 0;
}
static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
struct qc_info qinfo;
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
if (!sb->s_qcop->set_info)
return -ENOSYS;
if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE))
return -EINVAL;
memset(&qinfo, 0, sizeof(qinfo));
if (info.dqi_valid & IIF_FLAGS) {
if (info.dqi_flags & ~DQF_SETINFO_MASK)
return -EINVAL;
if (info.dqi_flags & DQF_ROOT_SQUASH)
qinfo.i_flags |= QCI_ROOT_SQUASH;
qinfo.i_fieldmask |= QC_FLAGS;
}
if (info.dqi_valid & IIF_BGRACE) {
qinfo.i_spc_timelimit = info.dqi_bgrace;
qinfo.i_fieldmask |= QC_SPC_TIMER;
}
if (info.dqi_valid & IIF_IGRACE) {
qinfo.i_ino_timelimit = info.dqi_igrace;
qinfo.i_fieldmask |= QC_INO_TIMER;
}
return sb->s_qcop->set_info(sb, type, &qinfo);
}
static inline qsize_t qbtos(qsize_t blocks)
{
return blocks << QIF_DQBLKSIZE_BITS;
}
static inline qsize_t stoqb(qsize_t space)
{
return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
}
static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
{
memset(dst, 0, sizeof(*dst));
dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
dst->dqb_curspace = src->d_space;
dst->dqb_ihardlimit = src->d_ino_hardlimit;
dst->dqb_isoftlimit = src->d_ino_softlimit;
dst->dqb_curinodes = src->d_ino_count;
dst->dqb_btime = src->d_spc_timer;
dst->dqb_itime = src->d_ino_timer;
dst->dqb_valid = QIF_ALL;
}
static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct kqid qid;
struct qc_dqblk fdq;
struct if_dqblk idq;
int ret;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
if (ret)
return ret;
copy_to_if_dqblk(&idq, &fdq);
if (compat_need_64bit_alignment_fixup()) {
struct compat_if_dqblk __user *compat_dqblk = addr;
if (copy_to_user(compat_dqblk, &idq, sizeof(*compat_dqblk)))
return -EFAULT;
if (put_user(idq.dqb_valid, &compat_dqblk->dqb_valid))
return -EFAULT;
} else {
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
}
return 0;
}
/*
* Return quota for next active quota >= this id, if any exists,
* otherwise return -ENOENT via ->get_nextdqblk
*/
static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct kqid qid;
struct qc_dqblk fdq;
struct if_nextdqblk idq;
int ret;
if (!sb->s_qcop->get_nextdqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
if (ret)
return ret;
/* struct if_nextdqblk is a superset of struct if_dqblk */
copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
idq.dqb_id = from_kqid(current_user_ns(), qid);
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
{
dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
dst->d_space = src->dqb_curspace;
dst->d_ino_hardlimit = src->dqb_ihardlimit;
dst->d_ino_softlimit = src->dqb_isoftlimit;
dst->d_ino_count = src->dqb_curinodes;
dst->d_spc_timer = src->dqb_btime;
dst->d_ino_timer = src->dqb_itime;
dst->d_fieldmask = 0;
if (src->dqb_valid & QIF_BLIMITS)
dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
if (src->dqb_valid & QIF_SPACE)
dst->d_fieldmask |= QC_SPACE;
if (src->dqb_valid & QIF_ILIMITS)
dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
if (src->dqb_valid & QIF_INODES)
dst->d_fieldmask |= QC_INO_COUNT;
if (src->dqb_valid & QIF_BTIME)
dst->d_fieldmask |= QC_SPC_TIMER;
if (src->dqb_valid & QIF_ITIME)
dst->d_fieldmask |= QC_INO_TIMER;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct qc_dqblk fdq;
struct if_dqblk idq;
struct kqid qid;
if (compat_need_64bit_alignment_fixup()) {
struct compat_if_dqblk __user *compat_dqblk = addr;
if (copy_from_user(&idq, compat_dqblk, sizeof(*compat_dqblk)) ||
get_user(idq.dqb_valid, &compat_dqblk->dqb_valid))
return -EFAULT;
} else {
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
}
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
copy_from_if_dqblk(&fdq, &idq);
return sb->s_qcop->set_dqblk(sb, qid, &fdq);
}
static int quota_enable(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->quota_enable)
return -ENOSYS;
return sb->s_qcop->quota_enable(sb, flags);
}
static int quota_disable(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->quota_disable)
return -ENOSYS;
return sb->s_qcop->quota_disable(sb, flags);
}
static int quota_state_to_flags(struct qc_state *state)
{
int flags = 0;
if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED)
flags |= FS_QUOTA_UDQ_ACCT;
if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED)
flags |= FS_QUOTA_UDQ_ENFD;
if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)
flags |= FS_QUOTA_GDQ_ACCT;
if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED)
flags |= FS_QUOTA_GDQ_ENFD;
if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED)
flags |= FS_QUOTA_PDQ_ACCT;
if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED)
flags |= FS_QUOTA_PDQ_ENFD;
return flags;
}
static int quota_getstate(struct super_block *sb, int type,
struct fs_quota_stat *fqs)
{
struct qc_state state;
int ret;
memset(&state, 0, sizeof (struct qc_state));
ret = sb->s_qcop->get_state(sb, &state);
if (ret < 0)
return ret;
memset(fqs, 0, sizeof(*fqs));
fqs->qs_version = FS_QSTAT_VERSION;
fqs->qs_flags = quota_state_to_flags(&state);
/* No quota enabled? */
if (!fqs->qs_flags)
return -ENOSYS;
fqs->qs_incoredqs = state.s_incoredqs;
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
/* Inodes may be allocated even if inactive; copy out if present */
if (state.s_state[USRQUOTA].ino) {
fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
}
if (state.s_state[GRPQUOTA].ino) {
fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
}
if (state.s_state[PRJQUOTA].ino) {
/*
* Q_XGETQSTAT doesn't have room for both group and project
* quotas. So, allow the project quota values to be copied out
* only if there is no group quota information available.
*/
if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) {
fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino;
fqs->qs_gquota.qfs_nblks =
state.s_state[PRJQUOTA].blocks;
fqs->qs_gquota.qfs_nextents =
state.s_state[PRJQUOTA].nextents;
}
}
return 0;
}
static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user *to,
struct fs_qfilestat *from)
{
if (copy_to_user(to, from, sizeof(*to)) ||
put_user(from->qfs_nextents, &to->qfs_nextents))
return -EFAULT;
return 0;
}
static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user *to,
struct fs_quota_stat *from)
{
if (put_user(from->qs_version, &to->qs_version) ||
put_user(from->qs_flags, &to->qs_flags) ||
put_user(from->qs_pad, &to->qs_pad) ||
compat_copy_fs_qfilestat(&to->qs_uquota, &from->qs_uquota) ||
compat_copy_fs_qfilestat(&to->qs_gquota, &from->qs_gquota) ||
put_user(from->qs_incoredqs, &to->qs_incoredqs) ||
put_user(from->qs_btimelimit, &to->qs_btimelimit) ||
put_user(from->qs_itimelimit, &to->qs_itimelimit) ||
put_user(from->qs_rtbtimelimit, &to->qs_rtbtimelimit) ||
put_user(from->qs_bwarnlimit, &to->qs_bwarnlimit) ||
put_user(from->qs_iwarnlimit, &to->qs_iwarnlimit))
return -EFAULT;
return 0;
}
static int quota_getxstate(struct super_block *sb, int type, void __user *addr)
{
struct fs_quota_stat fqs;
int ret;
if (!sb->s_qcop->get_state)
return -ENOSYS;
ret = quota_getstate(sb, type, &fqs);
if (ret)
return ret;
if (compat_need_64bit_alignment_fixup())
return compat_copy_fs_quota_stat(addr, &fqs);
if (copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return 0;
}
static int quota_getstatev(struct super_block *sb, int type,
struct fs_quota_statv *fqs)
{
struct qc_state state;
int ret;
memset(&state, 0, sizeof (struct qc_state));
ret = sb->s_qcop->get_state(sb, &state);
if (ret < 0)
return ret;
memset(fqs, 0, sizeof(*fqs));
fqs->qs_version = FS_QSTAT_VERSION;
fqs->qs_flags = quota_state_to_flags(&state);
/* No quota enabled? */
if (!fqs->qs_flags)
return -ENOSYS;
fqs->qs_incoredqs = state.s_incoredqs;
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
fqs->qs_rtbwarnlimit = state.s_state[type].rt_spc_warnlimit;
/* Inodes may be allocated even if inactive; copy out if present */
if (state.s_state[USRQUOTA].ino) {
fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
}
if (state.s_state[GRPQUOTA].ino) {
fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
}
if (state.s_state[PRJQUOTA].ino) {
fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
}
return 0;
}
static int quota_getxstatev(struct super_block *sb, int type, void __user *addr)
{
struct fs_quota_statv fqs;
int ret;
if (!sb->s_qcop->get_state)
return -ENOSYS;
memset(&fqs, 0, sizeof(fqs));
if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */
return -EFAULT;
/* If this kernel doesn't support user specified version, fail */
switch (fqs.qs_version) {
case FS_QSTATV_VERSION1:
break;
default:
return -EINVAL;
}
ret = quota_getstatev(sb, type, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
}
/*
* XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
* out of there as xfsprogs rely on definitions being in that header file. So
* just define same functions here for quota purposes.
*/
#define XFS_BB_SHIFT 9
static inline u64 quota_bbtob(u64 blocks)
{
return blocks << XFS_BB_SHIFT;
}
static inline u64 quota_btobb(u64 bytes)
{
return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
}
static inline s64 copy_from_xfs_dqblk_ts(const struct fs_disk_quota *d,
__s32 timer, __s8 timer_hi)
{
if (d->d_fieldmask & FS_DQ_BIGTIME)
return (u32)timer | (s64)timer_hi << 32;
return timer;
}
static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
{
dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
dst->d_ino_hardlimit = src->d_ino_hardlimit;
dst->d_ino_softlimit = src->d_ino_softlimit;
dst->d_space = quota_bbtob(src->d_bcount);
dst->d_ino_count = src->d_icount;
dst->d_ino_timer = copy_from_xfs_dqblk_ts(src, src->d_itimer,
src->d_itimer_hi);
dst->d_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_btimer,
src->d_btimer_hi);
dst->d_ino_warns = src->d_iwarns;
dst->d_spc_warns = src->d_bwarns;
dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
dst->d_rt_space = quota_bbtob(src->d_rtbcount);
dst->d_rt_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_rtbtimer,
src->d_rtbtimer_hi);
dst->d_rt_spc_warns = src->d_rtbwarns;
dst->d_fieldmask = 0;
if (src->d_fieldmask & FS_DQ_ISOFT)
dst->d_fieldmask |= QC_INO_SOFT;
if (src->d_fieldmask & FS_DQ_IHARD)
dst->d_fieldmask |= QC_INO_HARD;
if (src->d_fieldmask & FS_DQ_BSOFT)
dst->d_fieldmask |= QC_SPC_SOFT;
if (src->d_fieldmask & FS_DQ_BHARD)
dst->d_fieldmask |= QC_SPC_HARD;
if (src->d_fieldmask & FS_DQ_RTBSOFT)
dst->d_fieldmask |= QC_RT_SPC_SOFT;
if (src->d_fieldmask & FS_DQ_RTBHARD)
dst->d_fieldmask |= QC_RT_SPC_HARD;
if (src->d_fieldmask & FS_DQ_BTIMER)
dst->d_fieldmask |= QC_SPC_TIMER;
if (src->d_fieldmask & FS_DQ_ITIMER)
dst->d_fieldmask |= QC_INO_TIMER;
if (src->d_fieldmask & FS_DQ_RTBTIMER)
dst->d_fieldmask |= QC_RT_SPC_TIMER;
if (src->d_fieldmask & FS_DQ_BWARNS)
dst->d_fieldmask |= QC_SPC_WARNS;
if (src->d_fieldmask & FS_DQ_IWARNS)
dst->d_fieldmask |= QC_INO_WARNS;
if (src->d_fieldmask & FS_DQ_RTBWARNS)
dst->d_fieldmask |= QC_RT_SPC_WARNS;
if (src->d_fieldmask & FS_DQ_BCOUNT)
dst->d_fieldmask |= QC_SPACE;
if (src->d_fieldmask & FS_DQ_ICOUNT)
dst->d_fieldmask |= QC_INO_COUNT;
if (src->d_fieldmask & FS_DQ_RTBCOUNT)
dst->d_fieldmask |= QC_RT_SPACE;
}
static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst,
struct fs_disk_quota *src)
{
memset(dst, 0, sizeof(*dst));
dst->i_spc_timelimit = src->d_btimer;
dst->i_ino_timelimit = src->d_itimer;
dst->i_rt_spc_timelimit = src->d_rtbtimer;
dst->i_ino_warnlimit = src->d_iwarns;
dst->i_spc_warnlimit = src->d_bwarns;
dst->i_rt_spc_warnlimit = src->d_rtbwarns;
if (src->d_fieldmask & FS_DQ_BWARNS)
dst->i_fieldmask |= QC_SPC_WARNS;
if (src->d_fieldmask & FS_DQ_IWARNS)
dst->i_fieldmask |= QC_INO_WARNS;
if (src->d_fieldmask & FS_DQ_RTBWARNS)
dst->i_fieldmask |= QC_RT_SPC_WARNS;
if (src->d_fieldmask & FS_DQ_BTIMER)
dst->i_fieldmask |= QC_SPC_TIMER;
if (src->d_fieldmask & FS_DQ_ITIMER)
dst->i_fieldmask |= QC_INO_TIMER;
if (src->d_fieldmask & FS_DQ_RTBTIMER)
dst->i_fieldmask |= QC_RT_SPC_TIMER;
}
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct qc_dqblk qdq;
struct kqid qid;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
/* Are we actually setting timer / warning limits for all users? */
if (from_kqid(sb->s_user_ns, qid) == 0 &&
fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
struct qc_info qinfo;
int ret;
if (!sb->s_qcop->set_info)
return -EINVAL;
copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
ret = sb->s_qcop->set_info(sb, type, &qinfo);
if (ret)
return ret;
/* These are already done */
fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
}
copy_from_xfs_dqblk(&qdq, &fdq);
return sb->s_qcop->set_dqblk(sb, qid, &qdq);
}
static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota *d,
__s32 *timer_lo, __s8 *timer_hi, s64 timer)
{
*timer_lo = timer;
if (d->d_fieldmask & FS_DQ_BIGTIME)
*timer_hi = timer >> 32;
}
static inline bool want_bigtime(s64 timer)
{
return timer > S32_MAX || timer < S32_MIN;
}
static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
int type, qid_t id)
{
memset(dst, 0, sizeof(*dst));
if (want_bigtime(src->d_ino_timer) || want_bigtime(src->d_spc_timer) ||
want_bigtime(src->d_rt_spc_timer))
dst->d_fieldmask |= FS_DQ_BIGTIME;
dst->d_version = FS_DQUOT_VERSION;
dst->d_id = id;
if (type == USRQUOTA)
dst->d_flags = FS_USER_QUOTA;
else if (type == PRJQUOTA)
dst->d_flags = FS_PROJ_QUOTA;
else
dst->d_flags = FS_GROUP_QUOTA;
dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
dst->d_ino_hardlimit = src->d_ino_hardlimit;
dst->d_ino_softlimit = src->d_ino_softlimit;
dst->d_bcount = quota_btobb(src->d_space);
dst->d_icount = src->d_ino_count;
copy_to_xfs_dqblk_ts(dst, &dst->d_itimer, &dst->d_itimer_hi,
src->d_ino_timer);
copy_to_xfs_dqblk_ts(dst, &dst->d_btimer, &dst->d_btimer_hi,
src->d_spc_timer);
dst->d_iwarns = src->d_ino_warns;
dst->d_bwarns = src->d_spc_warns;
dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
dst->d_rtbcount = quota_btobb(src->d_rt_space);
copy_to_xfs_dqblk_ts(dst, &dst->d_rtbtimer, &dst->d_rtbtimer_hi,
src->d_rt_spc_timer);
dst->d_rtbwarns = src->d_rt_spc_warns;
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct qc_dqblk qdq;
struct kqid qid;
int ret;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
if (ret)
return ret;
copy_to_xfs_dqblk(&fdq, &qdq, type, id);
if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
/*
* Return quota for next active quota >= this id, if any exists,
* otherwise return -ENOENT via ->get_nextdqblk.
*/
static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct qc_dqblk qdq;
struct kqid qid;
qid_t id_out;
int ret;
if (!sb->s_qcop->get_nextdqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
if (ret)
return ret;
id_out = from_kqid(current_user_ns(), qid);
copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
static int quota_rmxquota(struct super_block *sb, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->rm_xquota)
return -ENOSYS;
return sb->s_qcop->rm_xquota(sb, flags);
}
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr, const struct path *path)
{
int ret;
type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.
*/
if (!sb->s_qcop)
return -ENOSYS;
if (!(sb->s_quota_types & (1 << type)))
return -EINVAL;
ret = check_quotactl_permission(sb, type, cmd, id);
if (ret < 0)
return ret;
switch (cmd) {
case Q_QUOTAON:
return quota_quotaon(sb, type, id, path);
case Q_QUOTAOFF:
return quota_quotaoff(sb, type);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
return quota_getinfo(sb, type, addr);
case Q_SETINFO:
return quota_setinfo(sb, type, addr);
case Q_GETQUOTA:
return quota_getquota(sb, type, id, addr);
case Q_GETNEXTQUOTA:
return quota_getnextquota(sb, type, id, addr);
case Q_SETQUOTA:
return quota_setquota(sb, type, id, addr);
case Q_SYNC:
if (!sb->s_qcop->quota_sync)
return -ENOSYS;
return sb->s_qcop->quota_sync(sb, type);
case Q_XQUOTAON:
return quota_enable(sb, addr);
case Q_XQUOTAOFF:
return quota_disable(sb, addr);
case Q_XQUOTARM:
return quota_rmxquota(sb, addr);
case Q_XGETQSTAT:
return quota_getxstate(sb, type, addr);
case Q_XGETQSTATV:
return quota_getxstatev(sb, type, addr);
case Q_XSETQLIM:
return quota_setxquota(sb, type, id, addr);
case Q_XGETQUOTA:
return quota_getxquota(sb, type, id, addr);
case Q_XGETNEXTQUOTA:
return quota_getnextxquota(sb, type, id, addr);
case Q_XQUOTASYNC:
if (sb_rdonly(sb))
return -EROFS;
/* XFS quotas are fully coherent now, making this call a noop */
return 0;
default:
return -EINVAL;
}
}
/* Return 1 if 'cmd' will block on frozen filesystem */
static int quotactl_cmd_write(int cmd)
{
/*
* We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
* as dquot_acquire() may allocate space for new structure and OCFS2
* needs to increment on-disk use count.
*/
switch (cmd) {
case Q_GETFMT:
case Q_GETINFO:
case Q_SYNC:
case Q_XGETQSTAT:
case Q_XGETQSTATV:
case Q_XGETQUOTA:
case Q_XGETNEXTQUOTA:
case Q_XQUOTASYNC:
return 0;
}
return 1;
}
/* Return true if quotactl command is manipulating quota on/off state */
static bool quotactl_cmd_onoff(int cmd)
{
return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
(cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
}
/*
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
*/
static struct super_block *quotactl_block(const char __user *special, int cmd)
{
#ifdef CONFIG_BLOCK
struct super_block *sb;
struct filename *tmp = getname(special);
bool excl = false, thawed = false;
int error;
dev_t dev;
if (IS_ERR(tmp))
return ERR_CAST(tmp);
error = lookup_bdev(tmp->name, &dev);
putname(tmp);
if (error)
return ERR_PTR(error);
if (quotactl_cmd_onoff(cmd)) {
excl = true;
thawed = true;
} else if (quotactl_cmd_write(cmd)) {
thawed = true;
}
retry:
sb = user_get_super(dev, excl);
if (!sb)
return ERR_PTR(-ENODEV);
if (thawed && sb->s_writers.frozen != SB_UNFROZEN) {
if (excl)
up_write(&sb->s_umount);
else
up_read(&sb->s_umount);
/* Wait for sb to unfreeze */
sb_start_write(sb);
sb_end_write(sb);
put_super(sb);
goto retry;
}
return sb;
#else
return ERR_PTR(-ENODEV);
#endif
}
/*
* This is the system call interface. This communicates with
* the user-level programs. Currently this only supports diskquota
* calls. Maybe we need to add the process quotas etc. in the future,
* but we probably should use rlimits for that.
*/
SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
qid_t, id, void __user *, addr)
{
uint cmds, type;
struct super_block *sb = NULL;
struct path path, *pathp = NULL;
int ret;
cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK;
if (type >= MAXQUOTAS)
return -EINVAL;
/*
* As a special case Q_SYNC can be called without a specific device.
* It will iterate all superblocks that have quota enabled and call
* the sync action on each of them.
*/
if (!special) {
if (cmds == Q_SYNC)
return quota_sync_all(type);
return -ENODEV;
}
/*
* Path for quotaon has to be resolved before grabbing superblock
* because that gets s_umount sem which is also possibly needed by path
* resolution (think about autofs) and thus deadlocks could arise.
*/
if (cmds == Q_QUOTAON) {
ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
if (ret)
pathp = ERR_PTR(ret);
else
pathp = &path;
}
sb = quotactl_block(special, cmds);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
goto out;
}
ret = do_quotactl(sb, type, cmds, id, addr, pathp);
if (!quotactl_cmd_onoff(cmds))
drop_super(sb);
else
drop_super_exclusive(sb);
out:
if (pathp && !IS_ERR(pathp))
path_put(pathp);
return ret;
}
SYSCALL_DEFINE4(quotactl_fd, unsigned int, fd, unsigned int, cmd,
qid_t, id, void __user *, addr)
{
struct super_block *sb;
unsigned int cmds = cmd >> SUBCMDSHIFT;
unsigned int type = cmd & SUBCMDMASK;
struct fd f;
int ret;
f = fdget_raw(fd);
if (!f.file)
return -EBADF;
ret = -EINVAL;
if (type >= MAXQUOTAS)
goto out;
if (quotactl_cmd_write(cmds)) {
ret = mnt_want_write(f.file->f_path.mnt);
if (ret)
goto out;
}
sb = f.file->f_path.mnt->mnt_sb;
if (quotactl_cmd_onoff(cmds))
down_write(&sb->s_umount);
else
down_read(&sb->s_umount);
ret = do_quotactl(sb, type, cmds, id, addr, ERR_PTR(-EINVAL));
if (quotactl_cmd_onoff(cmds))
up_write(&sb->s_umount);
else
up_read(&sb->s_umount);
if (quotactl_cmd_write(cmds))
mnt_drop_write(f.file->f_path.mnt);
out:
fdput(f);
return ret;
}
| linux-master | fs/quota/quota.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/cred.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/quotaops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <net/netlink.h>
#include <net/genetlink.h>
static const struct genl_multicast_group quota_mcgrps[] = {
{ .name = "events", },
};
/* Netlink family structure for quota */
static struct genl_family quota_genl_family __ro_after_init = {
.module = THIS_MODULE,
.hdrsize = 0,
.name = "VFS_DQUOT",
.version = 1,
.maxattr = QUOTA_NL_A_MAX,
.mcgrps = quota_mcgrps,
.n_mcgrps = ARRAY_SIZE(quota_mcgrps),
};
/**
* quota_send_warning - Send warning to userspace about exceeded quota
* @qid: The kernel internal quota identifier.
* @dev: The device on which the fs is mounted (sb->s_dev)
* @warntype: The type of the warning: QUOTA_NL_...
*
* This can be used by filesystems (including those which don't use
* dquot) to send a message to userspace relating to quota limits.
*
*/
void quota_send_warning(struct kqid qid, dev_t dev,
const char warntype)
{
static atomic_t seq;
struct sk_buff *skb;
void *msg_head;
int ret;
int msg_size = 4 * nla_total_size(sizeof(u32)) +
2 * nla_total_size_64bit(sizeof(u64));
/* We have to allocate using GFP_NOFS as we are called from a
* filesystem performing write and thus further recursion into
* the fs to free some data could cause deadlocks. */
skb = genlmsg_new(msg_size, GFP_NOFS);
if (!skb) {
printk(KERN_ERR
"VFS: Not enough memory to send quota warning.\n");
return;
}
msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
"a_genl_family, 0, QUOTA_NL_C_WARNING);
if (!msg_head) {
printk(KERN_ERR
"VFS: Cannot store netlink header in quota warning.\n");
goto err_out;
}
ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
if (ret)
goto attr_err_out;
ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID,
from_kqid_munged(&init_user_ns, qid),
QUOTA_NL_A_PAD);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
if (ret)
goto attr_err_out;
ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID,
from_kuid_munged(&init_user_ns, current_uid()),
QUOTA_NL_A_PAD);
if (ret)
goto attr_err_out;
genlmsg_end(skb, msg_head);
genlmsg_multicast("a_genl_family, skb, 0, 0, GFP_NOFS);
return;
attr_err_out:
printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
err_out:
kfree_skb(skb);
}
EXPORT_SYMBOL(quota_send_warning);
static int __init quota_init(void)
{
if (genl_register_family("a_genl_family) != 0)
printk(KERN_ERR
"VFS: Failed to create quota netlink interface.\n");
return 0;
};
fs_initcall(quota_init);
| linux-master | fs/quota/netlink.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/export.h>
/**
* qid_eq - Test to see if to kquid values are the same
* @left: A qid value
* @right: Another quid value
*
* Return true if the two qid values are equal and false otherwise.
*/
bool qid_eq(struct kqid left, struct kqid right)
{
if (left.type != right.type)
return false;
switch(left.type) {
case USRQUOTA:
return uid_eq(left.uid, right.uid);
case GRPQUOTA:
return gid_eq(left.gid, right.gid);
case PRJQUOTA:
return projid_eq(left.projid, right.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(qid_eq);
/**
* qid_lt - Test to see if one qid value is less than another
* @left: The possibly lesser qid value
* @right: The possibly greater qid value
*
* Return true if left is less than right and false otherwise.
*/
bool qid_lt(struct kqid left, struct kqid right)
{
if (left.type < right.type)
return true;
if (left.type > right.type)
return false;
switch (left.type) {
case USRQUOTA:
return uid_lt(left.uid, right.uid);
case GRPQUOTA:
return gid_lt(left.gid, right.gid);
case PRJQUOTA:
return projid_lt(left.projid, right.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(qid_lt);
/**
* from_kqid - Create a qid from a kqid user-namespace pair.
* @targ: The user namespace we want a qid in.
* @kqid: The kernel internal quota identifier to start with.
*
* Map @kqid into the user-namespace specified by @targ and
* return the resulting qid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kqid has no mapping in @targ (qid_t)-1 is returned.
*/
qid_t from_kqid(struct user_namespace *targ, struct kqid kqid)
{
switch (kqid.type) {
case USRQUOTA:
return from_kuid(targ, kqid.uid);
case GRPQUOTA:
return from_kgid(targ, kqid.gid);
case PRJQUOTA:
return from_kprojid(targ, kqid.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(from_kqid);
/**
* from_kqid_munged - Create a qid from a kqid user-namespace pair.
* @targ: The user namespace we want a qid in.
* @kqid: The kernel internal quota identifier to start with.
*
* Map @kqid into the user-namespace specified by @targ and
* return the resulting qid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kqid from_kqid_munged never fails and always
* returns a valid projid. This makes from_kqid_munged
* appropriate for use in places where failing to provide
* a qid_t is not a good option.
*
* If @kqid has no mapping in @targ the kqid.type specific
* overflow identifier is returned.
*/
qid_t from_kqid_munged(struct user_namespace *targ, struct kqid kqid)
{
switch (kqid.type) {
case USRQUOTA:
return from_kuid_munged(targ, kqid.uid);
case GRPQUOTA:
return from_kgid_munged(targ, kqid.gid);
case PRJQUOTA:
return from_kprojid_munged(targ, kqid.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(from_kqid_munged);
/**
* qid_valid - Report if a valid value is stored in a kqid.
* @qid: The kernel internal quota identifier to test.
*/
bool qid_valid(struct kqid qid)
{
switch (qid.type) {
case USRQUOTA:
return uid_valid(qid.uid);
case GRPQUOTA:
return gid_valid(qid.gid);
case PRJQUOTA:
return projid_valid(qid.projid);
default:
BUG();
}
}
EXPORT_SYMBOL(qid_valid);
| linux-master | fs/quota/kqid.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Network filesystem high-level read support.
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/module.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/sched/mm.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
/*
* Clear the unread part of an I/O request.
*/
static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
{
struct iov_iter iter;
iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
subreq->start + subreq->transferred,
subreq->len - subreq->transferred);
iov_iter_zero(iov_iter_count(&iter), &iter);
}
static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
bool was_async)
{
struct netfs_io_subrequest *subreq = priv;
netfs_subreq_terminated(subreq, transferred_or_error, was_async);
}
/*
* Issue a read against the cache.
* - Eats the caller's ref on subreq.
*/
static void netfs_read_from_cache(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq,
enum netfs_read_from_hole read_hole)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct iov_iter iter;
netfs_stat(&netfs_n_rh_read);
iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
subreq->start + subreq->transferred,
subreq->len - subreq->transferred);
cres->ops->read(cres, subreq->start, &iter, read_hole,
netfs_cache_read_terminated, subreq);
}
/*
* Fill a subrequest region with zeroes.
*/
static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
netfs_stat(&netfs_n_rh_zero);
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_subreq_terminated(subreq, 0, false);
}
/*
* Ask the netfs to issue a read request to the server for us.
*
* The netfs is expected to read from subreq->pos + subreq->transferred to
* subreq->pos + subreq->len - 1. It may not backtrack and write data into the
* buffer prior to the transferred point as it might clobber dirty data
* obtained from the cache.
*
* Alternatively, the netfs is allowed to indicate one of two things:
*
* - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
* make progress.
*
* - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
* cleared.
*/
static void netfs_read_from_server(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
netfs_stat(&netfs_n_rh_download);
rreq->netfs_ops->issue_read(subreq);
}
/*
* Release those waiting.
*/
static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
{
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, was_async);
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
}
/*
* Deal with the completion of writing the data to the cache. We have to clear
* the PG_fscache bits on the folios involved and release the caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
bool was_async)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
pgoff_t unlocked = 0;
bool have_unlocked = false;
rcu_read_lock();
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
if (xas_retry(&xas, folio))
continue;
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/
if (have_unlocked && folio_index(folio) <= unlocked)
continue;
unlocked = folio_index(folio);
folio_end_fscache(folio);
have_unlocked = true;
}
}
rcu_read_unlock();
netfs_rreq_completed(rreq, was_async);
}
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
bool was_async)
{
struct netfs_io_subrequest *subreq = priv;
struct netfs_io_request *rreq = subreq->rreq;
if (IS_ERR_VALUE(transferred_or_error)) {
netfs_stat(&netfs_n_rh_write_failed);
trace_netfs_failure(rreq, subreq, transferred_or_error,
netfs_fail_copy_to_cache);
} else {
netfs_stat(&netfs_n_rh_write_done);
}
trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, was_async);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
}
/*
* Perform any outstanding writes to the cache. We inherit a ref from the
* caller.
*/
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct netfs_io_subrequest *subreq, *next, *p;
struct iov_iter iter;
int ret;
trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
/* We don't want terminating writes trying to wake us up whilst we're
* still going through the list.
*/
atomic_inc(&rreq->nr_copy_ops);
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
list_del_init(&subreq->rreq_link);
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_no_copy);
}
}
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
/* Amalgamate adjacent writes */
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
next = list_next_entry(subreq, rreq_link);
if (next->start != subreq->start + subreq->len)
break;
subreq->len += next->len;
list_del_init(&next->rreq_link);
netfs_put_subrequest(next, false,
netfs_sreq_trace_put_merged);
}
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
rreq->i_size, true);
if (ret < 0) {
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
continue;
}
iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
subreq->start, subreq->len);
atomic_inc(&rreq->nr_copy_ops);
netfs_stat(&netfs_n_rh_write);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
cres->ops->write(cres, subreq->start, &iter,
netfs_rreq_copy_terminated, subreq);
}
/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, false);
}
static void netfs_rreq_write_to_cache_work(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
netfs_rreq_do_write_to_cache(rreq);
}
static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
{
rreq->work.func = netfs_rreq_write_to_cache_work;
if (!queue_work(system_unbound_wq, &rreq->work))
BUG();
}
/*
* Handle a short read.
*/
static void netfs_rreq_short_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
__clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
__set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
netfs_stat(&netfs_n_rh_short_read);
trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
atomic_inc(&rreq->nr_outstanding);
if (subreq->source == NETFS_READ_FROM_CACHE)
netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
else
netfs_read_from_server(rreq, subreq);
}
/*
* Resubmit any short or failed operations. Returns true if we got the rreq
* ref back.
*/
static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
WARN_ON(in_interrupt());
trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
/* We don't want terminating submissions trying to wake us up whilst
* we're still going through the list.
*/
atomic_inc(&rreq->nr_outstanding);
__clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (subreq->error) {
if (subreq->source != NETFS_READ_FROM_CACHE)
break;
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->error = 0;
netfs_stat(&netfs_n_rh_download_instead);
trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
atomic_inc(&rreq->nr_outstanding);
netfs_read_from_server(rreq, subreq);
} else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
netfs_rreq_short_read(rreq, subreq);
}
}
/* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_outstanding))
return true;
wake_up_var(&rreq->nr_outstanding);
return false;
}
/*
* Check to see if the data read is still valid.
*/
static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
if (!rreq->netfs_ops->is_still_valid ||
rreq->netfs_ops->is_still_valid(rreq))
return;
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (subreq->source == NETFS_READ_FROM_CACHE) {
subreq->error = -ESTALE;
__set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
}
}
}
/*
* Assess the state of a read request and decide what to do next.
*
* Note that we could be in an ordinary kernel thread, on a workqueue or in
* softirq context at this point. We inherit a ref from the caller.
*/
static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
{
trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
again:
netfs_rreq_is_still_valid(rreq);
if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
if (netfs_rreq_perform_resubmissions(rreq))
goto again;
return;
}
netfs_rreq_unlock_folios(rreq);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async);
}
static void netfs_rreq_work(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
netfs_rreq_assess(rreq, false);
}
/*
* Handle the completion of all outstanding I/O operations on a read request.
* We inherit a ref from the caller.
*/
static void netfs_rreq_terminated(struct netfs_io_request *rreq,
bool was_async)
{
if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
was_async) {
if (!queue_work(system_unbound_wq, &rreq->work))
BUG();
} else {
netfs_rreq_assess(rreq, was_async);
}
}
/**
* netfs_subreq_terminated - Note the termination of an I/O operation.
* @subreq: The I/O request that has terminated.
* @transferred_or_error: The amount of data transferred or an error code.
* @was_async: The termination was asynchronous
*
* This tells the read helper that a contributory I/O operation has terminated,
* one way or another, and that it should integrate the results.
*
* The caller indicates in @transferred_or_error the outcome of the operation,
* supplying a positive value to indicate the number of bytes transferred, 0 to
* indicate a failure to transfer anything that should be retried or a negative
* error code. The helper will look after reissuing I/O operations as
* appropriate and writing downloaded data to the cache.
*
* If @was_async is true, the caller might be running in softirq or interrupt
* context and we can't sleep.
*/
void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
ssize_t transferred_or_error,
bool was_async)
{
struct netfs_io_request *rreq = subreq->rreq;
int u;
_enter("[%u]{%llx,%lx},%zd",
subreq->debug_index, subreq->start, subreq->flags,
transferred_or_error);
switch (subreq->source) {
case NETFS_READ_FROM_CACHE:
netfs_stat(&netfs_n_rh_read_done);
break;
case NETFS_DOWNLOAD_FROM_SERVER:
netfs_stat(&netfs_n_rh_download_done);
break;
default:
break;
}
if (IS_ERR_VALUE(transferred_or_error)) {
subreq->error = transferred_or_error;
trace_netfs_failure(rreq, subreq, transferred_or_error,
netfs_fail_read);
goto failed;
}
if (WARN(transferred_or_error > subreq->len - subreq->transferred,
"Subreq overread: R%x[%x] %zd > %zu - %zu",
rreq->debug_id, subreq->debug_index,
transferred_or_error, subreq->len, subreq->transferred))
transferred_or_error = subreq->len - subreq->transferred;
subreq->error = 0;
subreq->transferred += transferred_or_error;
if (subreq->transferred < subreq->len)
goto incomplete;
complete:
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
out:
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
u = atomic_dec_return(&rreq->nr_outstanding);
if (u == 0)
netfs_rreq_terminated(rreq, was_async);
else if (u == 1)
wake_up_var(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
return;
incomplete:
if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
netfs_clear_unread(subreq);
subreq->transferred = subreq->len;
goto complete;
}
if (transferred_or_error == 0) {
if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
subreq->error = -ENODATA;
goto failed;
}
} else {
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
}
__set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
goto out;
failed:
if (subreq->source == NETFS_READ_FROM_CACHE) {
netfs_stat(&netfs_n_rh_read_failed);
set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
} else {
netfs_stat(&netfs_n_rh_download_failed);
set_bit(NETFS_RREQ_FAILED, &rreq->flags);
rreq->error = subreq->error;
}
goto out;
}
EXPORT_SYMBOL(netfs_subreq_terminated);
static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
loff_t i_size)
{
struct netfs_io_request *rreq = subreq->rreq;
struct netfs_cache_resources *cres = &rreq->cache_resources;
if (cres->ops)
return cres->ops->prepare_read(subreq, i_size);
if (subreq->start >= rreq->i_size)
return NETFS_FILL_WITH_ZEROES;
return NETFS_DOWNLOAD_FROM_SERVER;
}
/*
* Work out what sort of subrequest the next one will be.
*/
static enum netfs_io_source
netfs_rreq_prepare_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
enum netfs_io_source source;
_enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
source = netfs_cache_prepare_read(subreq, rreq->i_size);
if (source == NETFS_INVALID_READ)
goto out;
if (source == NETFS_DOWNLOAD_FROM_SERVER) {
/* Call out to the netfs to let it shrink the request to fit
* its own I/O sizes and boundaries. If it shinks it here, it
* will be called again to make simultaneous calls; if it wants
* to make serial calls, it can indicate a short read and then
* we will call it again.
*/
if (subreq->len > rreq->i_size - subreq->start)
subreq->len = rreq->i_size - subreq->start;
if (rreq->netfs_ops->clamp_length &&
!rreq->netfs_ops->clamp_length(subreq)) {
source = NETFS_INVALID_READ;
goto out;
}
}
if (WARN_ON(subreq->len == 0))
source = NETFS_INVALID_READ;
out:
subreq->source = source;
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
return source;
}
/*
* Slice off a piece of a read request and submit an I/O request for it.
*/
static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
unsigned int *_debug_index)
{
struct netfs_io_subrequest *subreq;
enum netfs_io_source source;
subreq = netfs_alloc_subrequest(rreq);
if (!subreq)
return false;
subreq->debug_index = (*_debug_index)++;
subreq->start = rreq->start + rreq->submitted;
subreq->len = rreq->len - rreq->submitted;
_debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining
* subset. It tells us in subreq->flags what it decided should be done
* and adjusts subreq->len down if the subset crosses a cache boundary.
*
* Then when we hand the subset, it can choose to take a subset of that
* (the starts must coincide), in which case, we go around the loop
* again and ask it to download the next piece.
*/
source = netfs_rreq_prepare_read(rreq, subreq);
if (source == NETFS_INVALID_READ)
goto subreq_failed;
atomic_inc(&rreq->nr_outstanding);
rreq->submitted += subreq->len;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
switch (source) {
case NETFS_FILL_WITH_ZEROES:
netfs_fill_with_zeroes(rreq, subreq);
break;
case NETFS_DOWNLOAD_FROM_SERVER:
netfs_read_from_server(rreq, subreq);
break;
case NETFS_READ_FROM_CACHE:
netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
break;
default:
BUG();
}
return true;
subreq_failed:
rreq->error = subreq->error;
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
return false;
}
/*
* Begin the process of reading in a chunk of data, where that data may be
* stitched together from multiple sources, including multiple servers and the
* local cache.
*/
int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
{
unsigned int debug_index = 0;
int ret;
_enter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
if (rreq->len == 0) {
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
return -EIO;
}
INIT_WORK(&rreq->work, netfs_rreq_work);
if (sync)
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
/* Chop the read into slices according to what the cache and the netfs
* want and submit each one.
*/
atomic_set(&rreq->nr_outstanding, 1);
do {
if (!netfs_rreq_submit_slice(rreq, &debug_index))
break;
} while (rreq->submitted < rreq->len);
if (sync) {
/* Keep nr_outstanding incremented so that the ref always belongs to
* us, and the service code isn't punted off to a random thread pool to
* process.
*/
for (;;) {
wait_var_event(&rreq->nr_outstanding,
atomic_read(&rreq->nr_outstanding) == 1);
netfs_rreq_assess(rreq, false);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
break;
cond_resched();
}
ret = rreq->error;
if (ret == 0 && rreq->submitted < rreq->len) {
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
ret = -EIO;
}
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
} else {
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_assess(rreq, false);
ret = 0;
}
return ret;
}
| linux-master | fs/netfs/io.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Netfs support statistics
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/export.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t netfs_n_rh_readahead;
atomic_t netfs_n_rh_readpage;
atomic_t netfs_n_rh_rreq;
atomic_t netfs_n_rh_sreq;
atomic_t netfs_n_rh_download;
atomic_t netfs_n_rh_download_done;
atomic_t netfs_n_rh_download_failed;
atomic_t netfs_n_rh_download_instead;
atomic_t netfs_n_rh_read;
atomic_t netfs_n_rh_read_done;
atomic_t netfs_n_rh_read_failed;
atomic_t netfs_n_rh_zero;
atomic_t netfs_n_rh_short_read;
atomic_t netfs_n_rh_write;
atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed;
atomic_t netfs_n_rh_write_zskip;
void netfs_stats_show(struct seq_file *m)
{
seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n",
atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_readpage),
atomic_read(&netfs_n_rh_write_begin),
atomic_read(&netfs_n_rh_write_zskip),
atomic_read(&netfs_n_rh_rreq),
atomic_read(&netfs_n_rh_sreq));
seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero),
atomic_read(&netfs_n_rh_short_read),
atomic_read(&netfs_n_rh_write_zskip));
seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n",
atomic_read(&netfs_n_rh_download),
atomic_read(&netfs_n_rh_download_done),
atomic_read(&netfs_n_rh_download_failed),
atomic_read(&netfs_n_rh_download_instead));
seq_printf(m, "RdHelp : RD=%u rs=%u rf=%u\n",
atomic_read(&netfs_n_rh_read),
atomic_read(&netfs_n_rh_read_done),
atomic_read(&netfs_n_rh_read_failed));
seq_printf(m, "RdHelp : WR=%u ws=%u wf=%u\n",
atomic_read(&netfs_n_rh_write),
atomic_read(&netfs_n_rh_write_done),
atomic_read(&netfs_n_rh_write_failed));
}
EXPORT_SYMBOL(netfs_stats_show);
| linux-master | fs/netfs/stats.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Iterator helpers.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/scatterlist.h>
#include <linux/netfs.h>
#include "internal.h"
/**
* netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
* @orig: The original iterator
* @orig_len: The amount of iterator to copy
* @new: The iterator to be set up
* @extraction_flags: Flags to qualify the request
*
* Extract the page fragments from the given amount of the source iterator and
* build up a second iterator that refers to all of those bits. This allows
* the original iterator to disposed of.
*
* @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
* allowed on the pages extracted.
*
* On success, the number of elements in the bvec is returned, the original
* iterator will have been advanced by the amount extracted.
*
* The iov_iter_extract_mode() function should be used to query how cleanup
* should be performed.
*/
ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
struct iov_iter *new,
iov_iter_extraction_t extraction_flags)
{
struct bio_vec *bv = NULL;
struct page **pages;
unsigned int cur_npages;
unsigned int max_pages;
unsigned int npages = 0;
unsigned int i;
ssize_t ret;
size_t count = orig_len, offset, len;
size_t bv_size, pg_size;
if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig)))
return -EIO;
max_pages = iov_iter_npages(orig, INT_MAX);
bv_size = array_size(max_pages, sizeof(*bv));
bv = kvmalloc(bv_size, GFP_KERNEL);
if (!bv)
return -ENOMEM;
/* Put the page list at the end of the bvec list storage. bvec
* elements are larger than page pointers, so as long as we work
* 0->last, we should be fine.
*/
pg_size = array_size(max_pages, sizeof(*pages));
pages = (void *)bv + bv_size - pg_size;
while (count && npages < max_pages) {
ret = iov_iter_extract_pages(orig, &pages, count,
max_pages - npages, extraction_flags,
&offset);
if (ret < 0) {
pr_err("Couldn't get user pages (rc=%zd)\n", ret);
break;
}
if (ret > count) {
pr_err("get_pages rc=%zd more than %zu\n", ret, count);
break;
}
count -= ret;
ret += offset;
cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
if (npages + cur_npages > max_pages) {
pr_err("Out of bvec array capacity (%u vs %u)\n",
npages + cur_npages, max_pages);
break;
}
for (i = 0; i < cur_npages; i++) {
len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
bvec_set_page(bv + npages + i, *pages++, len - offset, offset);
ret -= len;
offset = 0;
}
npages += cur_npages;
}
iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
return npages;
}
EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
| linux-master | fs/netfs/iterator.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Network filesystem high-level buffered read support.
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/export.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
/*
* Unlock the folios in a read operation. We need to set PG_fscache on any
* folios we're going to write back before we unlock them.
*/
void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
size_t account = 0;
bool subreq_failed = false;
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
__clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
}
}
/* Walk through the pagecache and the I/O request lists simultaneously.
* We may have a mixture of cached and uncached sections and we only
* really want to write out the uncached sections. This is slightly
* complicated by the possibility that we might have huge pages with a
* mixture inside.
*/
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
subreq_failed = (subreq->error < 0);
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
rcu_read_lock();
xas_for_each(&xas, folio, last_page) {
loff_t pg_end;
bool pg_failed = false;
bool folio_started;
if (xas_retry(&xas, folio))
continue;
pg_end = folio_pos(folio) + folio_size(folio) - 1;
folio_started = false;
for (;;) {
loff_t sreq_end;
if (!subreq) {
pg_failed = true;
break;
}
if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
folio_start_fscache(folio);
folio_started = true;
}
pg_failed |= subreq_failed;
sreq_end = subreq->start + subreq->len - 1;
if (pg_end < sreq_end)
break;
account += subreq->transferred;
if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
subreq = list_next_entry(subreq, rreq_link);
subreq_failed = (subreq->error < 0);
} else {
subreq = NULL;
subreq_failed = false;
}
if (pg_end == sreq_end)
break;
}
if (!pg_failed) {
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio_index(folio) == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock");
else
folio_unlock(folio);
}
}
rcu_read_unlock();
task_io_account_read(account);
if (rreq->netfs_ops->done)
rreq->netfs_ops->done(rreq);
}
static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
loff_t *_start, size_t *_len, loff_t i_size)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
if (cres->ops && cres->ops->expand_readahead)
cres->ops->expand_readahead(cres, _start, _len, i_size);
}
static void netfs_rreq_expand(struct netfs_io_request *rreq,
struct readahead_control *ractl)
{
/* Give the cache a chance to change the request parameters. The
* resultant request must contain the original region.
*/
netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
/* Give the netfs a chance to change the request parameters. The
* resultant request must contain the original region.
*/
if (rreq->netfs_ops->expand_readahead)
rreq->netfs_ops->expand_readahead(rreq);
/* Expand the request if the cache wants it to start earlier. Note
* that the expansion may get further extended if the VM wishes to
* insert THPs and the preferred start and/or end wind up in the middle
* of THPs.
*
* If this is the case, however, the THP size should be an integer
* multiple of the cache granule size, so we get a whole number of
* granules to deal with.
*/
if (rreq->start != readahead_pos(ractl) ||
rreq->len != readahead_length(ractl)) {
readahead_expand(ractl, rreq->start, rreq->len);
rreq->start = readahead_pos(ractl);
rreq->len = readahead_length(ractl);
trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
netfs_read_trace_expanded);
}
}
/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
*
* Fulfil a readahead request by drawing data from the cache if possible, or
* the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
* requests from different sources will get munged together. If necessary, the
* readahead window can be expanded in either direction to a more convenient
* alighment for RPC efficiency or to make storage in the cache feasible.
*
* The calling netfs must initialise a netfs context contiguous to the vfs
* inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
void netfs_readahead(struct readahead_control *ractl)
{
struct netfs_io_request *rreq;
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
int ret;
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0)
return;
rreq = netfs_alloc_request(ractl->mapping, ractl->file,
readahead_pos(ractl),
readahead_length(ractl),
NETFS_READAHEAD);
if (IS_ERR(rreq))
return;
if (ctx->ops->begin_cache_operation) {
ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
}
netfs_stat(&netfs_n_rh_readahead);
trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
netfs_read_trace_readahead);
netfs_rreq_expand(rreq, ractl);
/* Drop the refs on the folios here rather than in the cache or
* filesystem. The locks will be dropped in netfs_rreq_unlock().
*/
while (readahead_folio(ractl))
;
netfs_begin_read(rreq, false);
return;
cleanup_free:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
return;
}
EXPORT_SYMBOL(netfs_readahead);
/**
* netfs_read_folio - Helper to manage a read_folio request
* @file: The file to read from
* @folio: The folio to read
*
* Fulfil a read_folio request by drawing data from the cache if
* possible, or the netfs if not. Space beyond the EOF is zero-filled.
* Multiple I/O requests from different sources will get munged together.
*
* The calling netfs must initialise a netfs context contiguous to the vfs
* inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
int netfs_read_folio(struct file *file, struct folio *folio)
{
struct address_space *mapping = folio_file_mapping(folio);
struct netfs_io_request *rreq;
struct netfs_inode *ctx = netfs_inode(mapping->host);
int ret;
_enter("%lx", folio_index(folio));
rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
NETFS_READPAGE);
if (IS_ERR(rreq)) {
ret = PTR_ERR(rreq);
goto alloc_error;
}
if (ctx->ops->begin_cache_operation) {
ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto discard;
}
netfs_stat(&netfs_n_rh_readpage);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
return netfs_begin_read(rreq, true);
discard:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
alloc_error:
folio_unlock(folio);
return ret;
}
EXPORT_SYMBOL(netfs_read_folio);
/*
* Prepare a folio for writing without reading first
* @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
* @always_fill: T if the folio should always be completely filled/cleared
*
* In some cases, write_begin doesn't need to read at all:
* - full folio write
* - write that lies in a folio that is completely beyond EOF
* - write that covers the folio from start to EOF or beyond it
*
* If any of these criteria are met, then zero out the unwritten parts
* of the folio and return true. Otherwise, return false.
*/
static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
bool always_fill)
{
struct inode *inode = folio_inode(folio);
loff_t i_size = i_size_read(inode);
size_t offset = offset_in_folio(folio, pos);
size_t plen = folio_size(folio);
if (unlikely(always_fill)) {
if (pos - offset + len <= i_size)
return false; /* Page entirely before EOF */
zero_user_segment(&folio->page, 0, plen);
folio_mark_uptodate(folio);
return true;
}
/* Full folio write */
if (offset == 0 && len >= plen)
return true;
/* Page entirely beyond the end of the file */
if (pos - offset >= i_size)
goto zero_out;
/* Write that covers from the start of the folio to EOF or beyond */
if (offset == 0 && (pos + len) >= i_size)
goto zero_out;
return false;
zero_out:
zero_user_segments(&folio->page, 0, offset, offset + len, plen);
return true;
}
/**
* netfs_write_begin - Helper to prepare for writing
* @ctx: The netfs context
* @file: The file to read from
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
* @len: The length of the write (may extend beyond the end of the folio chosen)
* @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
*
* Pre-read data for a write-begin request by drawing data from the cache if
* possible, or the netfs if not. Space beyond the EOF is zero-filled.
* Multiple I/O requests from different sources will get munged together. If
* necessary, the readahead window can be expanded in either direction to a
* more convenient alighment for RPC efficiency or to make storage in the cache
* feasible.
*
* The calling netfs must provide a table of operations, only one of which,
* issue_op, is mandatory.
*
* The check_write_begin() operation can be provided to check for and flush
* conflicting writes once the folio is grabbed and locked. It is passed a
* pointer to the fsdata cookie that gets returned to the VM to be passed to
* write_end. It is permitted to sleep. It should return 0 if the request
* should go ahead or it may return an error. It may also unlock and put the
* folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
* will cause the folio to be re-got and the process to be retried.
*
* The calling netfs must initialise a netfs context contiguous to the vfs
* inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct netfs_inode *ctx,
struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, struct folio **_folio,
void **_fsdata)
{
struct netfs_io_request *rreq;
struct folio *folio;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
if (ctx->ops->check_write_begin) {
/* Allow the netfs (eg. ceph) to flush conflicts. */
ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
if (ret < 0) {
trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
goto error;
}
if (!folio)
goto retry;
}
if (folio_test_uptodate(folio))
goto have_folio;
/* If the page is beyond the EOF, we want to clear it - unless it's
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
if (!netfs_is_cache_enabled(ctx) &&
netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_folio_no_wait;
}
rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
NETFS_READ_FOR_WRITE);
if (IS_ERR(rreq)) {
ret = PTR_ERR(rreq);
goto error;
}
rreq->no_unlock_folio = folio_index(folio);
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
if (ctx->ops->begin_cache_operation) {
ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto error_put;
}
netfs_stat(&netfs_n_rh_write_begin);
trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
/* Expand the request to meet caching requirements and download
* preferences.
*/
ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl);
/* We hold the folio locks, so we can drop the references */
folio_get(folio);
while (readahead_folio(&ractl))
;
ret = netfs_begin_read(rreq, true);
if (ret < 0)
goto error;
have_folio:
ret = folio_wait_fscache_killable(folio);
if (ret < 0)
goto error;
have_folio_no_wait:
*_folio = folio;
_leave(" = 0");
return 0;
error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
error:
if (folio) {
folio_unlock(folio);
folio_put(folio);
}
_leave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);
| linux-master | fs/netfs/buffered_read.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Miscellaneous bits for the netfs support library.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/module.h>
#include <linux/export.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/netfs.h>
MODULE_DESCRIPTION("Network fs support");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
unsigned netfs_debug;
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
| linux-master | fs/netfs/main.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Object lifetime handling and tracing.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/slab.h>
#include "internal.h"
/*
* Allocate an I/O request and initialise it.
*/
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct file *file,
loff_t start, size_t len,
enum netfs_io_origin origin)
{
static atomic_t debug_ids;
struct inode *inode = file ? file_inode(file) : mapping->host;
struct netfs_inode *ctx = netfs_inode(inode);
struct netfs_io_request *rreq;
int ret;
rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
if (!rreq)
return ERR_PTR(-ENOMEM);
rreq->start = start;
rreq->len = len;
rreq->origin = origin;
rreq->netfs_ops = ctx->ops;
rreq->mapping = mapping;
rreq->inode = inode;
rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
INIT_LIST_HEAD(&rreq->subrequests);
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) {
kfree(rreq);
return ERR_PTR(ret);
}
}
netfs_stat(&netfs_n_rh_rreq);
return rreq;
}
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
{
int r;
__refcount_inc(&rreq->ref, &r);
trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
}
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
{
struct netfs_io_subrequest *subreq;
while (!list_empty(&rreq->subrequests)) {
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, was_async,
netfs_sreq_trace_put_clear);
}
}
static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
netfs_clear_subrequests(rreq, false);
if (rreq->netfs_ops->free_request)
rreq->netfs_ops->free_request(rreq);
if (rreq->cache_resources.ops)
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
kfree(rreq);
netfs_stat_d(&netfs_n_rh_rreq);
}
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
enum netfs_rreq_ref_trace what)
{
unsigned int debug_id = rreq->debug_id;
bool dead;
int r;
dead = __refcount_dec_and_test(&rreq->ref, &r);
trace_netfs_rreq_ref(debug_id, r - 1, what);
if (dead) {
if (was_async) {
rreq->work.func = netfs_free_request;
if (!queue_work(system_unbound_wq, &rreq->work))
BUG();
} else {
netfs_free_request(&rreq->work);
}
}
}
/*
* Allocate and partially initialise an I/O request structure.
*/
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
if (subreq) {
INIT_LIST_HEAD(&subreq->rreq_link);
refcount_set(&subreq->ref, 2);
subreq->rreq = rreq;
netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
netfs_stat(&netfs_n_rh_sreq);
}
return subreq;
}
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what)
{
int r;
__refcount_inc(&subreq->ref, &r);
trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
what);
}
static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
bool was_async)
{
struct netfs_io_request *rreq = subreq->rreq;
trace_netfs_sreq(subreq, netfs_sreq_trace_free);
kfree(subreq);
netfs_stat_d(&netfs_n_rh_sreq);
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
}
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
enum netfs_sreq_ref_trace what)
{
unsigned int debug_index = subreq->debug_index;
unsigned int debug_id = subreq->rreq->debug_id;
bool dead;
int r;
dead = __refcount_dec_and_test(&subreq->ref, &r);
trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
if (dead)
netfs_free_subrequest(subreq, was_async);
}
| linux-master | fs/netfs/objects.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* mount.c - operations for initializing and mounting configfs.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
/* Random magic number */
#define CONFIGFS_MAGIC 0x62656570
static struct vfsmount *configfs_mount = NULL;
struct kmem_cache *configfs_dir_cachep;
static int configfs_mnt_count = 0;
static void configfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
free_inode_nonrcu(inode);
}
static const struct super_operations configfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.free_inode = configfs_free_inode,
};
static struct config_group configfs_root_group = {
.cg_item = {
.ci_namebuf = "root",
.ci_name = configfs_root_group.cg_item.ci_namebuf,
},
};
int configfs_is_root(struct config_item *item)
{
return item == &configfs_root_group.cg_item;
}
static struct configfs_dirent configfs_root = {
.s_sibling = LIST_HEAD_INIT(configfs_root.s_sibling),
.s_children = LIST_HEAD_INIT(configfs_root.s_children),
.s_element = &configfs_root_group.cg_item,
.s_type = CONFIGFS_ROOT,
.s_iattr = NULL,
};
static int configfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct dentry *root;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = CONFIGFS_MAGIC;
sb->s_op = &configfs_ops;
sb->s_time_gran = 1;
inode = configfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
&configfs_root, sb);
if (inode) {
inode->i_op = &configfs_root_inode_operations;
inode->i_fop = &configfs_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
} else {
pr_debug("could not get root inode\n");
return -ENOMEM;
}
root = d_make_root(inode);
if (!root) {
pr_debug("%s: could not get root dentry!\n",__func__);
return -ENOMEM;
}
config_group_init(&configfs_root_group);
configfs_root_group.cg_item.ci_dentry = root;
root->d_fsdata = &configfs_root;
sb->s_root = root;
sb->s_d_op = &configfs_dentry_ops; /* the rest get that */
return 0;
}
static int configfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, configfs_fill_super);
}
static const struct fs_context_operations configfs_context_ops = {
.get_tree = configfs_get_tree,
};
static int configfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &configfs_context_ops;
return 0;
}
static struct file_system_type configfs_fs_type = {
.owner = THIS_MODULE,
.name = "configfs",
.init_fs_context = configfs_init_fs_context,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("configfs");
struct dentry *configfs_pin_fs(void)
{
int err = simple_pin_fs(&configfs_fs_type, &configfs_mount,
&configfs_mnt_count);
return err ? ERR_PTR(err) : configfs_mount->mnt_root;
}
void configfs_release_fs(void)
{
simple_release_fs(&configfs_mount, &configfs_mnt_count);
}
static int __init configfs_init(void)
{
int err = -ENOMEM;
configfs_dir_cachep = kmem_cache_create("configfs_dir_cache",
sizeof(struct configfs_dirent),
0, 0, NULL);
if (!configfs_dir_cachep)
goto out;
err = sysfs_create_mount_point(kernel_kobj, "config");
if (err)
goto out2;
err = register_filesystem(&configfs_fs_type);
if (err)
goto out3;
return 0;
out3:
pr_err("Unable to register filesystem!\n");
sysfs_remove_mount_point(kernel_kobj, "config");
out2:
kmem_cache_destroy(configfs_dir_cachep);
configfs_dir_cachep = NULL;
out:
return err;
}
static void __exit configfs_exit(void)
{
unregister_filesystem(&configfs_fs_type);
sysfs_remove_mount_point(kernel_kobj, "config");
kmem_cache_destroy(configfs_dir_cachep);
configfs_dir_cachep = NULL;
}
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.2");
MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
core_initcall(configfs_init);
module_exit(configfs_exit);
| linux-master | fs/configfs/mount.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dir.c - Operations for configfs directories.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#undef DEBUG
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
/*
* Protects mutations of configfs_dirent linkage together with proper i_mutex
* Also protects mutations of symlinks linkage to target configfs_dirent
* Mutators of configfs_dirent linkage must *both* have the proper inode locked
* and configfs_dirent_lock locked, in that order.
* This allows one to safely traverse configfs_dirent trees and symlinks without
* having to lock inodes.
*
* Protects setting of CONFIGFS_USET_DROPPING: checking the flag
* unlocked is not reliable unless in detach_groups() called from
* rmdir()/unregister() and from configfs_attach_group()
*/
DEFINE_SPINLOCK(configfs_dirent_lock);
/*
* All of link_obj/unlink_obj/link_group/unlink_group require that
* subsys->su_mutex is held.
* But parent configfs_subsystem is NULL when config_item is root.
* Use this mutex when config_item is root.
*/
static DEFINE_MUTEX(configfs_subsystem_mutex);
static void configfs_d_iput(struct dentry * dentry,
struct inode * inode)
{
struct configfs_dirent *sd = dentry->d_fsdata;
if (sd) {
/* Coordinate with configfs_readdir */
spin_lock(&configfs_dirent_lock);
/*
* Set sd->s_dentry to null only when this dentry is the one
* that is going to be killed. Otherwise configfs_d_iput may
* run just after configfs_lookup and set sd->s_dentry to
* NULL even it's still in use.
*/
if (sd->s_dentry == dentry)
sd->s_dentry = NULL;
spin_unlock(&configfs_dirent_lock);
configfs_put(sd);
}
iput(inode);
}
const struct dentry_operations configfs_dentry_ops = {
.d_iput = configfs_d_iput,
.d_delete = always_delete_dentry,
};
#ifdef CONFIG_LOCKDEP
/*
* Helpers to make lockdep happy with our recursive locking of default groups'
* inodes (see configfs_attach_group() and configfs_detach_group()).
* We put default groups i_mutexes in separate classes according to their depth
* from the youngest non-default group ancestor.
*
* For a non-default group A having default groups A/B, A/C, and A/C/D, default
* groups A/B and A/C will have their inode's mutex in class
* default_group_class[0], and default group A/C/D will be in
* default_group_class[1].
*
* The lock classes are declared and assigned in inode.c, according to the
* s_depth value.
* The s_depth value is initialized to -1, adjusted to >= 0 when attaching
* default groups, and reset to -1 when all default groups are attached. During
* attachment, if configfs_create() sees s_depth > 0, the lock class of the new
* inode's mutex is set to default_group_class[s_depth - 1].
*/
static void configfs_init_dirent_depth(struct configfs_dirent *sd)
{
sd->s_depth = -1;
}
static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd,
struct configfs_dirent *sd)
{
int parent_depth = parent_sd->s_depth;
if (parent_depth >= 0)
sd->s_depth = parent_depth + 1;
}
static void
configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd)
{
/*
* item's i_mutex class is already setup, so s_depth is now only
* used to set new sub-directories s_depth, which is always done
* with item's i_mutex locked.
*/
/*
* sd->s_depth == -1 iff we are a non default group.
* else (we are a default group) sd->s_depth > 0 (see
* create_dir()).
*/
if (sd->s_depth == -1)
/*
* We are a non default group and we are going to create
* default groups.
*/
sd->s_depth = 0;
}
static void
configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
{
/* We will not create default groups anymore. */
sd->s_depth = -1;
}
#else /* CONFIG_LOCKDEP */
static void configfs_init_dirent_depth(struct configfs_dirent *sd)
{
}
static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd,
struct configfs_dirent *sd)
{
}
static void
configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd)
{
}
static void
configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
{
}
#endif /* CONFIG_LOCKDEP */
static struct configfs_fragment *new_fragment(void)
{
struct configfs_fragment *p;
p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
if (p) {
atomic_set(&p->frag_count, 1);
init_rwsem(&p->frag_sem);
p->frag_dead = false;
}
return p;
}
void put_fragment(struct configfs_fragment *frag)
{
if (frag && atomic_dec_and_test(&frag->frag_count))
kfree(frag);
}
struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
{
if (likely(frag))
atomic_inc(&frag->frag_count);
return frag;
}
/*
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
*/
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
void *element, int type,
struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL);
if (!sd)
return ERR_PTR(-ENOMEM);
atomic_set(&sd->s_count, 1);
INIT_LIST_HEAD(&sd->s_children);
sd->s_element = element;
sd->s_type = type;
configfs_init_dirent_depth(sd);
spin_lock(&configfs_dirent_lock);
if (parent_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
kmem_cache_free(configfs_dir_cachep, sd);
return ERR_PTR(-ENOENT);
}
sd->s_frag = get_fragment(frag);
list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
return sd;
}
/*
*
* Return -EEXIST if there is already a configfs element with the same
* name for the same parent.
*
* called with parent inode's i_mutex held
*/
static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
const unsigned char *new)
{
struct configfs_dirent * sd;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (sd->s_element) {
const unsigned char *existing = configfs_get_name(sd);
if (strcmp(existing, new))
continue;
else
return -EEXIST;
}
}
return 0;
}
int configfs_make_dirent(struct configfs_dirent * parent_sd,
struct dentry * dentry, void * element,
umode_t mode, int type, struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
sd = configfs_new_dirent(parent_sd, element, type, frag);
if (IS_ERR(sd))
return PTR_ERR(sd);
sd->s_mode = mode;
sd->s_dentry = dentry;
if (dentry)
dentry->d_fsdata = configfs_get(sd);
return 0;
}
static void configfs_remove_dirent(struct dentry *dentry)
{
struct configfs_dirent *sd = dentry->d_fsdata;
if (!sd)
return;
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_put(sd);
}
/**
* configfs_create_dir - create a directory for an config_item.
* @item: config_itemwe're creating directory for.
* @dentry: config_item's dentry.
* @frag: config_item's fragment.
*
* Note: user-created entries won't be allowed under this new directory
* until it is validated by configfs_dir_set_ready()
*/
static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
struct configfs_fragment *frag)
{
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
struct dentry *p = dentry->d_parent;
struct inode *inode;
BUG_ON(!item);
error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
if (unlikely(error))
return error;
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
CONFIGFS_DIR | CONFIGFS_USET_CREATING,
frag);
if (unlikely(error))
return error;
configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
inode = configfs_create(dentry, mode);
if (IS_ERR(inode))
goto out_remove;
inode->i_op = &configfs_dir_inode_operations;
inode->i_fop = &configfs_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
/* already hashed */
dget(dentry); /* pin directory dentries in core */
inc_nlink(d_inode(p));
item->ci_dentry = dentry;
return 0;
out_remove:
configfs_put(dentry->d_fsdata);
configfs_remove_dirent(dentry);
return PTR_ERR(inode);
}
/*
* Allow userspace to create new entries under a new directory created with
* configfs_create_dir(), and under all of its chidlren directories recursively.
* @sd configfs_dirent of the new directory to validate
*
* Caller must hold configfs_dirent_lock.
*/
static void configfs_dir_set_ready(struct configfs_dirent *sd)
{
struct configfs_dirent *child_sd;
sd->s_type &= ~CONFIGFS_USET_CREATING;
list_for_each_entry(child_sd, &sd->s_children, s_sibling)
if (child_sd->s_type & CONFIGFS_USET_CREATING)
configfs_dir_set_ready(child_sd);
}
/*
* Check that a directory does not belong to a directory hierarchy being
* attached and not validated yet.
* @sd configfs_dirent of the directory to check
*
* @return non-zero iff the directory was validated
*
* Note: takes configfs_dirent_lock, so the result may change from false to true
* in two consecutive calls, but never from true to false.
*/
int configfs_dirent_is_ready(struct configfs_dirent *sd)
{
int ret;
spin_lock(&configfs_dirent_lock);
ret = !(sd->s_type & CONFIGFS_USET_CREATING);
spin_unlock(&configfs_dirent_lock);
return ret;
}
int configfs_create_link(struct configfs_dirent *target, struct dentry *parent,
struct dentry *dentry, char *body)
{
int err = 0;
umode_t mode = S_IFLNK | S_IRWXUGO;
struct configfs_dirent *p = parent->d_fsdata;
struct inode *inode;
err = configfs_make_dirent(p, dentry, target, mode, CONFIGFS_ITEM_LINK,
p->s_frag);
if (err)
return err;
inode = configfs_create(dentry, mode);
if (IS_ERR(inode))
goto out_remove;
inode->i_link = body;
inode->i_op = &configfs_symlink_inode_operations;
d_instantiate(dentry, inode);
dget(dentry); /* pin link dentries in core */
return 0;
out_remove:
configfs_put(dentry->d_fsdata);
configfs_remove_dirent(dentry);
return PTR_ERR(inode);
}
static void remove_dir(struct dentry * d)
{
struct dentry * parent = dget(d->d_parent);
configfs_remove_dirent(d);
if (d_really_is_positive(d))
simple_rmdir(d_inode(parent),d);
pr_debug(" o %pd removing done (%d)\n", d, d_count(d));
dput(parent);
}
/**
* configfs_remove_dir - remove an config_item's directory.
* @item: config_item we're removing.
*
* The only thing special about this is that we remove any files in
* the directory before we remove the directory, and we've inlined
* what used to be configfs_rmdir() below, instead of calling separately.
*
* Caller holds the mutex of the item's inode
*/
static void configfs_remove_dir(struct config_item * item)
{
struct dentry * dentry = dget(item->ci_dentry);
if (!dentry)
return;
remove_dir(dentry);
/**
* Drop reference from dget() on entrance.
*/
dput(dentry);
}
static struct dentry * configfs_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags)
{
struct configfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
struct configfs_dirent * sd;
struct inode *inode = NULL;
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
/*
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
*
* This forbids userspace to read/write attributes of items which may
* not complete their initialization, since the dentries of the
* attributes won't be instantiated.
*/
if (!configfs_dirent_is_ready(parent_sd))
return ERR_PTR(-ENOENT);
spin_lock(&configfs_dirent_lock);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if ((sd->s_type & CONFIGFS_NOT_PINNED) &&
!strcmp(configfs_get_name(sd), dentry->d_name.name)) {
struct configfs_attribute *attr = sd->s_element;
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
dentry->d_fsdata = configfs_get(sd);
sd->s_dentry = dentry;
spin_unlock(&configfs_dirent_lock);
inode = configfs_create(dentry, mode);
if (IS_ERR(inode)) {
configfs_put(sd);
return ERR_CAST(inode);
}
if (sd->s_type & CONFIGFS_ITEM_BIN_ATTR) {
inode->i_size = 0;
inode->i_fop = &configfs_bin_file_operations;
} else {
inode->i_size = PAGE_SIZE;
inode->i_fop = &configfs_file_operations;
}
goto done;
}
}
spin_unlock(&configfs_dirent_lock);
done:
d_add(dentry, inode);
return NULL;
}
/*
* Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are
* attributes and are removed by rmdir(). We recurse, setting
* CONFIGFS_USET_DROPPING on all children that are candidates for
* default detach.
* If there is an error, the caller will reset the flags via
* configfs_detach_rollback().
*/
static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait)
{
struct configfs_dirent *parent_sd = dentry->d_fsdata;
struct configfs_dirent *sd;
int ret;
/* Mark that we're trying to drop the group */
parent_sd->s_type |= CONFIGFS_USET_DROPPING;
ret = -EBUSY;
if (parent_sd->s_links)
goto out;
ret = 0;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element ||
(sd->s_type & CONFIGFS_NOT_PINNED))
continue;
if (sd->s_type & CONFIGFS_USET_DEFAULT) {
/* Abort if racing with mkdir() */
if (sd->s_type & CONFIGFS_USET_IN_MKDIR) {
if (wait)
*wait= dget(sd->s_dentry);
return -EAGAIN;
}
/*
* Yup, recursive. If there's a problem, blame
* deep nesting of default_groups
*/
ret = configfs_detach_prep(sd->s_dentry, wait);
if (!ret)
continue;
} else
ret = -ENOTEMPTY;
break;
}
out:
return ret;
}
/*
* Walk the tree, resetting CONFIGFS_USET_DROPPING wherever it was
* set.
*/
static void configfs_detach_rollback(struct dentry *dentry)
{
struct configfs_dirent *parent_sd = dentry->d_fsdata;
struct configfs_dirent *sd;
parent_sd->s_type &= ~CONFIGFS_USET_DROPPING;
list_for_each_entry(sd, &parent_sd->s_children, s_sibling)
if (sd->s_type & CONFIGFS_USET_DEFAULT)
configfs_detach_rollback(sd->s_dentry);
}
static void detach_attrs(struct config_item * item)
{
struct dentry * dentry = dget(item->ci_dentry);
struct configfs_dirent * parent_sd;
struct configfs_dirent * sd, * tmp;
if (!dentry)
return;
pr_debug("configfs %s: dropping attrs for dir\n",
dentry->d_name.name);
parent_sd = dentry->d_fsdata;
list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
if (!sd->s_element || !(sd->s_type & CONFIGFS_NOT_PINNED))
continue;
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_drop_dentry(sd, dentry);
configfs_put(sd);
}
/**
* Drop reference from dget() on entrance.
*/
dput(dentry);
}
static int populate_attrs(struct config_item *item)
{
const struct config_item_type *t = item->ci_type;
struct configfs_attribute *attr;
struct configfs_bin_attribute *bin_attr;
int error = 0;
int i;
if (!t)
return -EINVAL;
if (t->ct_attrs) {
for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) {
if ((error = configfs_create_file(item, attr)))
break;
}
}
if (t->ct_bin_attrs) {
for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) {
error = configfs_create_bin_file(item, bin_attr);
if (error)
break;
}
}
if (error)
detach_attrs(item);
return error;
}
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
struct dentry *dentry,
struct configfs_fragment *frag);
static void configfs_detach_group(struct config_item *item);
static void detach_groups(struct config_group *group)
{
struct dentry * dentry = dget(group->cg_item.ci_dentry);
struct dentry *child;
struct configfs_dirent *parent_sd;
struct configfs_dirent *sd, *tmp;
if (!dentry)
return;
parent_sd = dentry->d_fsdata;
list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
if (!sd->s_element ||
!(sd->s_type & CONFIGFS_USET_DEFAULT))
continue;
child = sd->s_dentry;
inode_lock(d_inode(child));
configfs_detach_group(sd->s_element);
d_inode(child)->i_flags |= S_DEAD;
dont_mount(child);
inode_unlock(d_inode(child));
d_delete(child);
dput(child);
}
/**
* Drop reference from dget() on entrance.
*/
dput(dentry);
}
/*
* This fakes mkdir(2) on a default_groups[] entry. It
* creates a dentry, attachs it, and then does fixup
* on the sd->s_type.
*
* We could, perhaps, tweak our parent's ->mkdir for a minute and
* try using vfs_mkdir. Just a thought.
*/
static int create_default_group(struct config_group *parent_group,
struct config_group *group,
struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
/* We trust the caller holds a reference to parent */
struct dentry *child, *parent = parent_group->cg_item.ci_dentry;
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
ret = -ENOMEM;
child = d_alloc_name(parent, group->cg_item.ci_name);
if (child) {
d_add(child, NULL);
ret = configfs_attach_group(&parent_group->cg_item,
&group->cg_item, child, frag);
if (!ret) {
sd = child->d_fsdata;
sd->s_type |= CONFIGFS_USET_DEFAULT;
} else {
BUG_ON(d_inode(child));
d_drop(child);
dput(child);
}
}
return ret;
}
static int populate_groups(struct config_group *group,
struct configfs_fragment *frag)
{
struct config_group *new_group;
int ret = 0;
list_for_each_entry(new_group, &group->default_groups, group_entry) {
ret = create_default_group(group, new_group, frag);
if (ret) {
detach_groups(group);
break;
}
}
return ret;
}
void configfs_remove_default_groups(struct config_group *group)
{
struct config_group *g, *n;
list_for_each_entry_safe(g, n, &group->default_groups, group_entry) {
list_del(&g->group_entry);
config_item_put(&g->cg_item);
}
}
EXPORT_SYMBOL(configfs_remove_default_groups);
/*
* All of link_obj/unlink_obj/link_group/unlink_group require that
* subsys->su_mutex is held.
*/
static void unlink_obj(struct config_item *item)
{
struct config_group *group;
group = item->ci_group;
if (group) {
list_del_init(&item->ci_entry);
item->ci_group = NULL;
item->ci_parent = NULL;
/* Drop the reference for ci_entry */
config_item_put(item);
/* Drop the reference for ci_parent */
config_group_put(group);
}
}
static void link_obj(struct config_item *parent_item, struct config_item *item)
{
/*
* Parent seems redundant with group, but it makes certain
* traversals much nicer.
*/
item->ci_parent = parent_item;
/*
* We hold a reference on the parent for the child's ci_parent
* link.
*/
item->ci_group = config_group_get(to_config_group(parent_item));
list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
/*
* We hold a reference on the child for ci_entry on the parent's
* cg_children
*/
config_item_get(item);
}
static void unlink_group(struct config_group *group)
{
struct config_group *new_group;
list_for_each_entry(new_group, &group->default_groups, group_entry)
unlink_group(new_group);
group->cg_subsys = NULL;
unlink_obj(&group->cg_item);
}
static void link_group(struct config_group *parent_group, struct config_group *group)
{
struct config_group *new_group;
struct configfs_subsystem *subsys = NULL; /* gcc is a turd */
link_obj(&parent_group->cg_item, &group->cg_item);
if (parent_group->cg_subsys)
subsys = parent_group->cg_subsys;
else if (configfs_is_root(&parent_group->cg_item))
subsys = to_configfs_subsystem(group);
else
BUG();
group->cg_subsys = subsys;
list_for_each_entry(new_group, &group->default_groups, group_entry)
link_group(group, new_group);
}
/*
* The goal is that configfs_attach_item() (and
* configfs_attach_group()) can be called from either the VFS or this
* module. That is, they assume that the items have been created,
* the dentry allocated, and the dcache is all ready to go.
*
* If they fail, they must clean up after themselves as if they
* had never been called. The caller (VFS or local function) will
* handle cleaning up the dcache bits.
*
* configfs_detach_group() and configfs_detach_item() behave similarly on
* the way out. They assume that the proper semaphores are held, they
* clean up the configfs items, and they expect their callers will
* handle the dcache bits.
*/
static int configfs_attach_item(struct config_item *parent_item,
struct config_item *item,
struct dentry *dentry,
struct configfs_fragment *frag)
{
int ret;
ret = configfs_create_dir(item, dentry, frag);
if (!ret) {
ret = populate_attrs(item);
if (ret) {
/*
* We are going to remove an inode and its dentry but
* the VFS may already have hit and used them. Thus,
* we must lock them as rmdir() would.
*/
inode_lock(d_inode(dentry));
configfs_remove_dir(item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
inode_unlock(d_inode(dentry));
d_delete(dentry);
}
}
return ret;
}
/* Caller holds the mutex of the item's inode */
static void configfs_detach_item(struct config_item *item)
{
detach_attrs(item);
configfs_remove_dir(item);
}
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
struct dentry *dentry,
struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
ret = configfs_attach_item(parent_item, item, dentry, frag);
if (!ret) {
sd = dentry->d_fsdata;
sd->s_type |= CONFIGFS_USET_DIR;
/*
* FYI, we're faking mkdir in populate_groups()
* We must lock the group's inode to avoid races with the VFS
* which can already hit the inode and try to add/remove entries
* under it.
*
* We must also lock the inode to remove it safely in case of
* error, as rmdir() would.
*/
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
ret = populate_groups(to_config_group(item), frag);
if (ret) {
configfs_detach_item(item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
}
configfs_adjust_dir_dirent_depth_after_populate(sd);
inode_unlock(d_inode(dentry));
if (ret)
d_delete(dentry);
}
return ret;
}
/* Caller holds the mutex of the group's inode */
static void configfs_detach_group(struct config_item *item)
{
detach_groups(to_config_group(item));
configfs_detach_item(item);
}
/*
* After the item has been detached from the filesystem view, we are
* ready to tear it out of the hierarchy. Notify the client before
* we do that so they can perform any cleanup that requires
* navigating the hierarchy. A client does not need to provide this
* callback. The subsystem semaphore MUST be held by the caller, and
* references must be valid for both items. It also assumes the
* caller has validated ci_type.
*/
static void client_disconnect_notify(struct config_item *parent_item,
struct config_item *item)
{
const struct config_item_type *type;
type = parent_item->ci_type;
BUG_ON(!type);
if (type->ct_group_ops && type->ct_group_ops->disconnect_notify)
type->ct_group_ops->disconnect_notify(to_config_group(parent_item),
item);
}
/*
* Drop the initial reference from make_item()/make_group()
* This function assumes that reference is held on item
* and that item holds a valid reference to the parent. Also, it
* assumes the caller has validated ci_type.
*/
static void client_drop_item(struct config_item *parent_item,
struct config_item *item)
{
const struct config_item_type *type;
type = parent_item->ci_type;
BUG_ON(!type);
/*
* If ->drop_item() exists, it is responsible for the
* config_item_put().
*/
if (type->ct_group_ops && type->ct_group_ops->drop_item)
type->ct_group_ops->drop_item(to_config_group(parent_item),
item);
else
config_item_put(item);
}
#ifdef DEBUG
static void configfs_dump_one(struct configfs_dirent *sd, int level)
{
pr_info("%*s\"%s\":\n", level, " ", configfs_get_name(sd));
#define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type);
type_print(CONFIGFS_ROOT);
type_print(CONFIGFS_DIR);
type_print(CONFIGFS_ITEM_ATTR);
type_print(CONFIGFS_ITEM_LINK);
type_print(CONFIGFS_USET_DIR);
type_print(CONFIGFS_USET_DEFAULT);
type_print(CONFIGFS_USET_DROPPING);
#undef type_print
}
static int configfs_dump(struct configfs_dirent *sd, int level)
{
struct configfs_dirent *child_sd;
int ret = 0;
configfs_dump_one(sd, level);
if (!(sd->s_type & (CONFIGFS_DIR|CONFIGFS_ROOT)))
return 0;
list_for_each_entry(child_sd, &sd->s_children, s_sibling) {
ret = configfs_dump(child_sd, level + 2);
if (ret)
break;
}
return ret;
}
#endif
/*
* configfs_depend_item() and configfs_undepend_item()
*
* WARNING: Do not call these from a configfs callback!
*
* This describes these functions and their helpers.
*
* Allow another kernel system to depend on a config_item. If this
* happens, the item cannot go away until the dependent can live without
* it. The idea is to give client modules as simple an interface as
* possible. When a system asks them to depend on an item, they just
* call configfs_depend_item(). If the item is live and the client
* driver is in good shape, we'll happily do the work for them.
*
* Why is the locking complex? Because configfs uses the VFS to handle
* all locking, but this function is called outside the normal
* VFS->configfs path. So it must take VFS locks to prevent the
* VFS->configfs stuff (configfs_mkdir(), configfs_rmdir(), etc). This is
* why you can't call these functions underneath configfs callbacks.
*
* Note, btw, that this can be called at *any* time, even when a configfs
* subsystem isn't registered, or when configfs is loading or unloading.
* Just like configfs_register_subsystem(). So we take the same
* precautions. We pin the filesystem. We lock configfs_dirent_lock.
* If we can find the target item in the
* configfs tree, it must be part of the subsystem tree as well, so we
* do not need the subsystem semaphore. Holding configfs_dirent_lock helps
* locking out mkdir() and rmdir(), who might be racing us.
*/
/*
* configfs_depend_prep()
*
* Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are
* attributes. This is similar but not the same to configfs_detach_prep().
* Note that configfs_detach_prep() expects the parent to be locked when it
* is called, but we lock the parent *inside* configfs_depend_prep(). We
* do that so we can unlock it if we find nothing.
*
* Here we do a depth-first search of the dentry hierarchy looking for
* our object.
* We deliberately ignore items tagged as dropping since they are virtually
* dead, as well as items in the middle of attachment since they virtually
* do not exist yet. This completes the locking out of racing mkdir() and
* rmdir().
* Note: subdirectories in the middle of attachment start with s_type =
* CONFIGFS_DIR|CONFIGFS_USET_CREATING set by create_dir(). When
* CONFIGFS_USET_CREATING is set, we ignore the item. The actual set of
* s_type is in configfs_new_dirent(), which has configfs_dirent_lock.
*
* If the target is not found, -ENOENT is bubbled up.
*
* This adds a requirement that all config_items be unique!
*
* This is recursive. There isn't
* much on the stack, though, so folks that need this function - be careful
* about your stack! Patches will be accepted to make it iterative.
*/
static int configfs_depend_prep(struct dentry *origin,
struct config_item *target)
{
struct configfs_dirent *child_sd, *sd;
int ret = 0;
BUG_ON(!origin || !origin->d_fsdata);
sd = origin->d_fsdata;
if (sd->s_element == target) /* Boo-yah */
goto out;
list_for_each_entry(child_sd, &sd->s_children, s_sibling) {
if ((child_sd->s_type & CONFIGFS_DIR) &&
!(child_sd->s_type & CONFIGFS_USET_DROPPING) &&
!(child_sd->s_type & CONFIGFS_USET_CREATING)) {
ret = configfs_depend_prep(child_sd->s_dentry,
target);
if (!ret)
goto out; /* Child path boo-yah */
}
}
/* We looped all our children and didn't find target */
ret = -ENOENT;
out:
return ret;
}
static int configfs_do_depend_item(struct dentry *subsys_dentry,
struct config_item *target)
{
struct configfs_dirent *p;
int ret;
spin_lock(&configfs_dirent_lock);
/* Scan the tree, return 0 if found */
ret = configfs_depend_prep(subsys_dentry, target);
if (ret)
goto out_unlock_dirent_lock;
/*
* We are sure that the item is not about to be removed by rmdir(), and
* not in the middle of attachment by mkdir().
*/
p = target->ci_dentry->d_fsdata;
p->s_dependent_count += 1;
out_unlock_dirent_lock:
spin_unlock(&configfs_dirent_lock);
return ret;
}
static inline struct configfs_dirent *
configfs_find_subsys_dentry(struct configfs_dirent *root_sd,
struct config_item *subsys_item)
{
struct configfs_dirent *p;
struct configfs_dirent *ret = NULL;
list_for_each_entry(p, &root_sd->s_children, s_sibling) {
if (p->s_type & CONFIGFS_DIR &&
p->s_element == subsys_item) {
ret = p;
break;
}
}
return ret;
}
int configfs_depend_item(struct configfs_subsystem *subsys,
struct config_item *target)
{
int ret;
struct configfs_dirent *subsys_sd;
struct config_item *s_item = &subsys->su_group.cg_item;
struct dentry *root;
/*
* Pin the configfs filesystem. This means we can safely access
* the root of the configfs filesystem.
*/
root = configfs_pin_fs();
if (IS_ERR(root))
return PTR_ERR(root);
/*
* Next, lock the root directory. We're going to check that the
* subsystem is really registered, and so we need to lock out
* configfs_[un]register_subsystem().
*/
inode_lock(d_inode(root));
subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item);
if (!subsys_sd) {
ret = -ENOENT;
goto out_unlock_fs;
}
/* Ok, now we can trust subsys/s_item */
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
out_unlock_fs:
inode_unlock(d_inode(root));
/*
* If we succeeded, the fs is pinned via other methods. If not,
* we're done with it anyway. So release_fs() is always right.
*/
configfs_release_fs();
return ret;
}
EXPORT_SYMBOL(configfs_depend_item);
/*
* Release the dependent linkage. This is much simpler than
* configfs_depend_item() because we know that the client driver is
* pinned, thus the subsystem is pinned, and therefore configfs is pinned.
*/
void configfs_undepend_item(struct config_item *target)
{
struct configfs_dirent *sd;
/*
* Since we can trust everything is pinned, we just need
* configfs_dirent_lock.
*/
spin_lock(&configfs_dirent_lock);
sd = target->ci_dentry->d_fsdata;
BUG_ON(sd->s_dependent_count < 1);
sd->s_dependent_count -= 1;
/*
* After this unlock, we cannot trust the item to stay alive!
* DO NOT REFERENCE item after this unlock.
*/
spin_unlock(&configfs_dirent_lock);
}
EXPORT_SYMBOL(configfs_undepend_item);
/*
* caller_subsys is a caller's subsystem not target's. This is used to
* determine if we should lock root and check subsys or not. When we are
* in the same subsystem as our target there is no need to do locking as
* we know that subsys is valid and is not unregistered during this function
* as we are called from callback of one of his children and VFS holds a lock
* on some inode. Otherwise we have to lock our root to ensure that target's
* subsystem it is not unregistered during this function.
*/
int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
struct config_item *target)
{
struct configfs_subsystem *target_subsys;
struct config_group *root, *parent;
struct configfs_dirent *subsys_sd;
int ret = -ENOENT;
/* Disallow this function for configfs root */
if (configfs_is_root(target))
return -EINVAL;
parent = target->ci_group;
/*
* This may happen when someone is trying to depend root
* directory of some subsystem
*/
if (configfs_is_root(&parent->cg_item)) {
target_subsys = to_configfs_subsystem(to_config_group(target));
root = parent;
} else {
target_subsys = parent->cg_subsys;
/* Find a cofnigfs root as we may need it for locking */
for (root = parent; !configfs_is_root(&root->cg_item);
root = root->cg_item.ci_group)
;
}
if (target_subsys != caller_subsys) {
/*
* We are in other configfs subsystem, so we have to do
* additional locking to prevent other subsystem from being
* unregistered
*/
inode_lock(d_inode(root->cg_item.ci_dentry));
/*
* As we are trying to depend item from other subsystem
* we have to check if this subsystem is still registered
*/
subsys_sd = configfs_find_subsys_dentry(
root->cg_item.ci_dentry->d_fsdata,
&target_subsys->su_group.cg_item);
if (!subsys_sd)
goto out_root_unlock;
} else {
subsys_sd = target_subsys->su_group.cg_item.ci_dentry->d_fsdata;
}
/* Now we can execute core of depend item */
ret = configfs_do_depend_item(subsys_sd->s_dentry, target);
if (target_subsys != caller_subsys)
out_root_unlock:
/*
* We were called from subsystem other than our target so we
* took some locks so now it's time to release them
*/
inode_unlock(d_inode(root->cg_item.ci_dentry));
return ret;
}
EXPORT_SYMBOL(configfs_depend_item_unlocked);
static int configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
int ret = 0;
int module_got = 0;
struct config_group *group = NULL;
struct config_item *item = NULL;
struct config_item *parent_item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
const struct config_item_type *type;
struct module *subsys_owner = NULL, *new_item_owner = NULL;
struct configfs_fragment *frag;
char *name;
sd = dentry->d_parent->d_fsdata;
/*
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
*/
if (!configfs_dirent_is_ready(sd)) {
ret = -ENOENT;
goto out;
}
if (!(sd->s_type & CONFIGFS_USET_DIR)) {
ret = -EPERM;
goto out;
}
frag = new_fragment();
if (!frag) {
ret = -ENOMEM;
goto out;
}
/* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
if (!type || !type->ct_group_ops ||
(!type->ct_group_ops->make_group &&
!type->ct_group_ops->make_item)) {
ret = -EPERM; /* Lack-of-mkdir returns -EPERM */
goto out_put;
}
/*
* The subsystem may belong to a different module than the item
* being created. We don't want to safely pin the new item but
* fail to pin the subsystem it sits under.
*/
if (!subsys->su_group.cg_item.ci_type) {
ret = -EINVAL;
goto out_put;
}
subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner;
if (!try_module_get(subsys_owner)) {
ret = -EINVAL;
goto out_put;
}
name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
if (!name) {
ret = -ENOMEM;
goto out_subsys_put;
}
snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
mutex_lock(&subsys->su_mutex);
if (type->ct_group_ops->make_group) {
group = type->ct_group_ops->make_group(to_config_group(parent_item), name);
if (!group)
group = ERR_PTR(-ENOMEM);
if (!IS_ERR(group)) {
link_group(to_config_group(parent_item), group);
item = &group->cg_item;
} else
ret = PTR_ERR(group);
} else {
item = type->ct_group_ops->make_item(to_config_group(parent_item), name);
if (!item)
item = ERR_PTR(-ENOMEM);
if (!IS_ERR(item))
link_obj(parent_item, item);
else
ret = PTR_ERR(item);
}
mutex_unlock(&subsys->su_mutex);
kfree(name);
if (ret) {
/*
* If ret != 0, then link_obj() was never called.
* There are no extra references to clean up.
*/
goto out_subsys_put;
}
/*
* link_obj() has been called (via link_group() for groups).
* From here on out, errors must clean that up.
*/
type = item->ci_type;
if (!type) {
ret = -EINVAL;
goto out_unlink;
}
new_item_owner = type->ct_owner;
if (!try_module_get(new_item_owner)) {
ret = -EINVAL;
goto out_unlink;
}
/*
* I hate doing it this way, but if there is
* an error, module_put() probably should
* happen after any cleanup.
*/
module_got = 1;
/*
* Make racing rmdir() fail if it did not tag parent with
* CONFIGFS_USET_DROPPING
* Note: if CONFIGFS_USET_DROPPING is already set, attach_group() will
* fail and let rmdir() terminate correctly
*/
spin_lock(&configfs_dirent_lock);
/* This will make configfs_detach_prep() fail */
sd->s_type |= CONFIGFS_USET_IN_MKDIR;
spin_unlock(&configfs_dirent_lock);
if (group)
ret = configfs_attach_group(parent_item, item, dentry, frag);
else
ret = configfs_attach_item(parent_item, item, dentry, frag);
spin_lock(&configfs_dirent_lock);
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
if (!ret)
configfs_dir_set_ready(dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
out_unlink:
if (ret) {
/* Tear down everything we built up */
mutex_lock(&subsys->su_mutex);
client_disconnect_notify(parent_item, item);
if (group)
unlink_group(group);
else
unlink_obj(item);
client_drop_item(parent_item, item);
mutex_unlock(&subsys->su_mutex);
if (module_got)
module_put(new_item_owner);
}
out_subsys_put:
if (ret)
module_put(subsys_owner);
out_put:
/*
* link_obj()/link_group() took a reference from child->parent,
* so the parent is safely pinned. We can drop our working
* reference.
*/
config_item_put(parent_item);
put_fragment(frag);
out:
return ret;
}
static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct config_item *parent_item;
struct config_item *item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
struct configfs_fragment *frag;
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
sd = dentry->d_fsdata;
if (sd->s_type & CONFIGFS_USET_DEFAULT)
return -EPERM;
/* Get a working ref until we have the child */
parent_item = configfs_get_config_item(dentry->d_parent);
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
if (!parent_item->ci_type) {
config_item_put(parent_item);
return -EINVAL;
}
/* configfs_mkdir() shouldn't have allowed this */
BUG_ON(!subsys->su_group.cg_item.ci_type);
subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner;
/*
* Ensure that no racing symlink() will make detach_prep() fail while
* the new link is temporarily attached
*/
do {
struct dentry *wait;
mutex_lock(&configfs_symlink_mutex);
spin_lock(&configfs_dirent_lock);
/*
* Here's where we check for dependents. We're protected by
* configfs_dirent_lock.
* If no dependent, atomically tag the item as dropping.
*/
ret = sd->s_dependent_count ? -EBUSY : 0;
if (!ret) {
ret = configfs_detach_prep(dentry, &wait);
if (ret)
configfs_detach_rollback(dentry);
}
spin_unlock(&configfs_dirent_lock);
mutex_unlock(&configfs_symlink_mutex);
if (ret) {
if (ret != -EAGAIN) {
config_item_put(parent_item);
return ret;
}
/* Wait until the racing operation terminates */
inode_lock(d_inode(wait));
inode_unlock(d_inode(wait));
dput(wait);
}
} while (ret == -EAGAIN);
frag = sd->s_frag;
if (down_write_killable(&frag->frag_sem)) {
spin_lock(&configfs_dirent_lock);
configfs_detach_rollback(dentry);
spin_unlock(&configfs_dirent_lock);
config_item_put(parent_item);
return -EINTR;
}
frag->frag_dead = true;
up_write(&frag->frag_sem);
/* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
/* Drop reference from above, item already holds one. */
config_item_put(parent_item);
if (item->ci_type)
dead_item_owner = item->ci_type->ct_owner;
if (sd->s_type & CONFIGFS_USET_DIR) {
configfs_detach_group(item);
mutex_lock(&subsys->su_mutex);
client_disconnect_notify(parent_item, item);
unlink_group(to_config_group(item));
} else {
configfs_detach_item(item);
mutex_lock(&subsys->su_mutex);
client_disconnect_notify(parent_item, item);
unlink_obj(item);
}
client_drop_item(parent_item, item);
mutex_unlock(&subsys->su_mutex);
/* Drop our reference from above */
config_item_put(item);
module_put(dead_item_owner);
module_put(subsys_owner);
return 0;
}
const struct inode_operations configfs_dir_inode_operations = {
.mkdir = configfs_mkdir,
.rmdir = configfs_rmdir,
.symlink = configfs_symlink,
.unlink = configfs_unlink,
.lookup = configfs_lookup,
.setattr = configfs_setattr,
};
const struct inode_operations configfs_root_inode_operations = {
.lookup = configfs_lookup,
.setattr = configfs_setattr,
};
static int configfs_dir_open(struct inode *inode, struct file *file)
{
struct dentry * dentry = file->f_path.dentry;
struct configfs_dirent * parent_sd = dentry->d_fsdata;
int err;
inode_lock(d_inode(dentry));
/*
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
*/
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
if (IS_ERR(file->private_data))
err = PTR_ERR(file->private_data);
else
err = 0;
}
inode_unlock(d_inode(dentry));
return err;
}
static int configfs_dir_close(struct inode *inode, struct file *file)
{
struct dentry * dentry = file->f_path.dentry;
struct configfs_dirent * cursor = file->private_data;
inode_lock(d_inode(dentry));
spin_lock(&configfs_dirent_lock);
list_del_init(&cursor->s_sibling);
spin_unlock(&configfs_dirent_lock);
inode_unlock(d_inode(dentry));
release_configfs_dirent(cursor);
return 0;
}
static int configfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct super_block *sb = dentry->d_sb;
struct configfs_dirent * parent_sd = dentry->d_fsdata;
struct configfs_dirent *cursor = file->private_data;
struct list_head *p, *q = &cursor->s_sibling;
ino_t ino = 0;
if (!dir_emit_dots(file, ctx))
return 0;
spin_lock(&configfs_dirent_lock);
if (ctx->pos == 2)
list_move(q, &parent_sd->s_children);
for (p = q->next; p != &parent_sd->s_children; p = p->next) {
struct configfs_dirent *next;
const char *name;
int len;
struct inode *inode = NULL;
next = list_entry(p, struct configfs_dirent, s_sibling);
if (!next->s_element)
continue;
/*
* We'll have a dentry and an inode for
* PINNED items and for open attribute
* files. We lock here to prevent a race
* with configfs_d_iput() clearing
* s_dentry before calling iput().
*
* Why do we go to the trouble? If
* someone has an attribute file open,
* the inode number should match until
* they close it. Beyond that, we don't
* care.
*/
dentry = next->s_dentry;
if (dentry)
inode = d_inode(dentry);
if (inode)
ino = inode->i_ino;
spin_unlock(&configfs_dirent_lock);
if (!inode)
ino = iunique(sb, 2);
name = configfs_get_name(next);
len = strlen(name);
if (!dir_emit(ctx, name, len, ino,
fs_umode_to_dtype(next->s_mode)))
return 0;
spin_lock(&configfs_dirent_lock);
list_move(q, p);
p = q;
ctx->pos++;
}
spin_unlock(&configfs_dirent_lock);
return 0;
}
static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence)
{
struct dentry * dentry = file->f_path.dentry;
switch (whence) {
case 1:
offset += file->f_pos;
fallthrough;
case 0:
if (offset >= 0)
break;
fallthrough;
default:
return -EINVAL;
}
if (offset != file->f_pos) {
file->f_pos = offset;
if (file->f_pos >= 2) {
struct configfs_dirent *sd = dentry->d_fsdata;
struct configfs_dirent *cursor = file->private_data;
struct list_head *p;
loff_t n = file->f_pos - 2;
spin_lock(&configfs_dirent_lock);
list_del(&cursor->s_sibling);
p = sd->s_children.next;
while (n && p != &sd->s_children) {
struct configfs_dirent *next;
next = list_entry(p, struct configfs_dirent,
s_sibling);
if (next->s_element)
n--;
p = p->next;
}
list_add_tail(&cursor->s_sibling, p);
spin_unlock(&configfs_dirent_lock);
}
}
return offset;
}
const struct file_operations configfs_dir_operations = {
.open = configfs_dir_open,
.release = configfs_dir_close,
.llseek = configfs_dir_lseek,
.read = generic_read_dir,
.iterate_shared = configfs_readdir,
};
/**
* configfs_register_group - creates a parent-child relation between two groups
* @parent_group: parent group
* @group: child group
*
* link groups, creates dentry for the child and attaches it to the
* parent dentry.
*
* Return: 0 on success, negative errno code on error
*/
int configfs_register_group(struct config_group *parent_group,
struct config_group *group)
{
struct configfs_subsystem *subsys = parent_group->cg_subsys;
struct dentry *parent;
struct configfs_fragment *frag;
int ret;
frag = new_fragment();
if (!frag)
return -ENOMEM;
mutex_lock(&subsys->su_mutex);
link_group(parent_group, group);
mutex_unlock(&subsys->su_mutex);
parent = parent_group->cg_item.ci_dentry;
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
ret = create_default_group(parent_group, group, frag);
if (ret)
goto err_out;
spin_lock(&configfs_dirent_lock);
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
inode_unlock(d_inode(parent));
put_fragment(frag);
return 0;
err_out:
inode_unlock(d_inode(parent));
mutex_lock(&subsys->su_mutex);
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
put_fragment(frag);
return ret;
}
EXPORT_SYMBOL(configfs_register_group);
/**
* configfs_unregister_group() - unregisters a child group from its parent
* @group: parent group to be unregistered
*
* Undoes configfs_register_group()
*/
void configfs_unregister_group(struct config_group *group)
{
struct configfs_subsystem *subsys = group->cg_subsys;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
struct configfs_dirent *sd = dentry->d_fsdata;
struct configfs_fragment *frag = sd->s_frag;
down_write(&frag->frag_sem);
frag->frag_dead = true;
up_write(&frag->frag_sem);
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
configfs_detach_prep(dentry, NULL);
spin_unlock(&configfs_dirent_lock);
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
d_drop(dentry);
fsnotify_rmdir(d_inode(parent), dentry);
inode_unlock(d_inode(parent));
dput(dentry);
mutex_lock(&subsys->su_mutex);
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
EXPORT_SYMBOL(configfs_unregister_group);
/**
* configfs_register_default_group() - allocates and registers a child group
* @parent_group: parent group
* @name: child group name
* @item_type: child item type description
*
* boilerplate to allocate and register a child group with its parent. We need
* kzalloc'ed memory because child's default_group is initially empty.
*
* Return: allocated config group or ERR_PTR() on error
*/
struct config_group *
configfs_register_default_group(struct config_group *parent_group,
const char *name,
const struct config_item_type *item_type)
{
int ret;
struct config_group *group;
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
config_group_init_type_name(group, name, item_type);
ret = configfs_register_group(parent_group, group);
if (ret) {
kfree(group);
return ERR_PTR(ret);
}
return group;
}
EXPORT_SYMBOL(configfs_register_default_group);
/**
* configfs_unregister_default_group() - unregisters and frees a child group
* @group: the group to act on
*/
void configfs_unregister_default_group(struct config_group *group)
{
configfs_unregister_group(group);
kfree(group);
}
EXPORT_SYMBOL(configfs_unregister_default_group);
int configfs_register_subsystem(struct configfs_subsystem *subsys)
{
int err;
struct config_group *group = &subsys->su_group;
struct dentry *dentry;
struct dentry *root;
struct configfs_dirent *sd;
struct configfs_fragment *frag;
frag = new_fragment();
if (!frag)
return -ENOMEM;
root = configfs_pin_fs();
if (IS_ERR(root)) {
put_fragment(frag);
return PTR_ERR(root);
}
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
sd = root->d_fsdata;
mutex_lock(&configfs_subsystem_mutex);
link_group(to_config_group(sd->s_element), group);
mutex_unlock(&configfs_subsystem_mutex);
inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
err = -ENOMEM;
dentry = d_alloc_name(root, group->cg_item.ci_name);
if (dentry) {
d_add(dentry, NULL);
err = configfs_attach_group(sd->s_element, &group->cg_item,
dentry, frag);
if (err) {
BUG_ON(d_inode(dentry));
d_drop(dentry);
dput(dentry);
} else {
spin_lock(&configfs_dirent_lock);
configfs_dir_set_ready(dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
}
}
inode_unlock(d_inode(root));
if (err) {
mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
put_fragment(frag);
return err;
}
void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
{
struct config_group *group = &subsys->su_group;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *root = dentry->d_sb->s_root;
struct configfs_dirent *sd = dentry->d_fsdata;
struct configfs_fragment *frag = sd->s_frag;
if (dentry->d_parent != root) {
pr_err("Tried to unregister non-subsystem!\n");
return;
}
down_write(&frag->frag_sem);
frag->frag_dead = true;
up_write(&frag->frag_sem);
inode_lock_nested(d_inode(root),
I_MUTEX_PARENT);
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
mutex_lock(&configfs_symlink_mutex);
spin_lock(&configfs_dirent_lock);
if (configfs_detach_prep(dentry, NULL)) {
pr_err("Tried to unregister non-empty subsystem!\n");
}
spin_unlock(&configfs_dirent_lock);
mutex_unlock(&configfs_symlink_mutex);
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
inode_unlock(d_inode(dentry));
d_drop(dentry);
fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(root));
dput(dentry);
mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
EXPORT_SYMBOL(configfs_register_subsystem);
EXPORT_SYMBOL(configfs_unregister_subsystem);
| linux-master | fs/configfs/dir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inode.c - basic inode and dentry operations.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* Please see Documentation/filesystems/configfs.rst for more
* information.
*/
#undef DEBUG
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
#ifdef CONFIG_LOCKDEP
static struct lock_class_key default_group_class[MAX_LOCK_DEPTH];
#endif
static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr,
};
int configfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *iattr)
{
struct inode * inode = d_inode(dentry);
struct configfs_dirent * sd = dentry->d_fsdata;
struct iattr * sd_iattr;
unsigned int ia_valid = iattr->ia_valid;
int error;
if (!sd)
return -EINVAL;
sd_iattr = sd->s_iattr;
if (!sd_iattr) {
/* setting attributes for the first time, allocate now */
sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
if (!sd_iattr)
return -ENOMEM;
/* assign default attributes */
sd_iattr->ia_mode = sd->s_mode;
sd_iattr->ia_uid = GLOBAL_ROOT_UID;
sd_iattr->ia_gid = GLOBAL_ROOT_GID;
sd_iattr->ia_atime = sd_iattr->ia_mtime =
sd_iattr->ia_ctime = current_time(inode);
sd->s_iattr = sd_iattr;
}
/* attributes were changed atleast once in past */
error = simple_setattr(idmap, dentry, iattr);
if (error)
return error;
if (ia_valid & ATTR_UID)
sd_iattr->ia_uid = iattr->ia_uid;
if (ia_valid & ATTR_GID)
sd_iattr->ia_gid = iattr->ia_gid;
if (ia_valid & ATTR_ATIME)
sd_iattr->ia_atime = iattr->ia_atime;
if (ia_valid & ATTR_MTIME)
sd_iattr->ia_mtime = iattr->ia_mtime;
if (ia_valid & ATTR_CTIME)
sd_iattr->ia_ctime = iattr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = iattr->ia_mode;
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
sd_iattr->ia_mode = sd->s_mode = mode;
}
return error;
}
static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
{
inode->i_mode = iattr->ia_mode;
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
inode->i_atime = iattr->ia_atime;
inode->i_mtime = iattr->ia_mtime;
inode_set_ctime_to_ts(inode, iattr->ia_ctime);
}
struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
struct super_block *s)
{
struct inode * inode = new_inode(s);
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mapping->a_ops = &ram_aops;
inode->i_op = &configfs_inode_operations;
if (sd->s_iattr) {
/* sysfs_dirent has non-default attributes
* get them for the new inode from persistent copy
* in sysfs_dirent
*/
set_inode_attr(inode, sd->s_iattr);
} else
set_default_inode_attr(inode, mode);
}
return inode;
}
#ifdef CONFIG_LOCKDEP
static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
struct inode *inode)
{
int depth = sd->s_depth;
if (depth > 0) {
if (depth <= ARRAY_SIZE(default_group_class)) {
lockdep_set_class(&inode->i_rwsem,
&default_group_class[depth - 1]);
} else {
/*
* In practice the maximum level of locking depth is
* already reached. Just inform about possible reasons.
*/
pr_info("Too many levels of inodes for the locking correctness validator.\n");
pr_info("Spurious warnings may appear.\n");
}
}
}
#else /* CONFIG_LOCKDEP */
static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
struct inode *inode)
{
}
#endif /* CONFIG_LOCKDEP */
struct inode *configfs_create(struct dentry *dentry, umode_t mode)
{
struct inode *inode = NULL;
struct configfs_dirent *sd;
struct inode *p_inode;
if (!dentry)
return ERR_PTR(-ENOENT);
if (d_really_is_positive(dentry))
return ERR_PTR(-EEXIST);
sd = dentry->d_fsdata;
inode = configfs_new_inode(mode, sd, dentry->d_sb);
if (!inode)
return ERR_PTR(-ENOMEM);
p_inode = d_inode(dentry->d_parent);
p_inode->i_mtime = inode_set_ctime_current(p_inode);
configfs_set_inode_lock_class(sd, inode);
return inode;
}
/*
* Get the name for corresponding element represented by the given configfs_dirent
*/
const unsigned char * configfs_get_name(struct configfs_dirent *sd)
{
struct configfs_attribute *attr;
BUG_ON(!sd || !sd->s_element);
/* These always have a dentry, so use that */
if (sd->s_type & (CONFIGFS_DIR | CONFIGFS_ITEM_LINK))
return sd->s_dentry->d_name.name;
if (sd->s_type & (CONFIGFS_ITEM_ATTR | CONFIGFS_ITEM_BIN_ATTR)) {
attr = sd->s_element;
return attr->ca_name;
}
return NULL;
}
/*
* Unhashes the dentry corresponding to given configfs_dirent
* Called with parent inode's i_mutex held.
*/
void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
{
struct dentry * dentry = sd->s_dentry;
if (dentry) {
spin_lock(&dentry->d_lock);
if (simple_positive(dentry)) {
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
simple_unlink(d_inode(parent), dentry);
} else
spin_unlock(&dentry->d_lock);
}
}
void configfs_hash_and_remove(struct dentry * dir, const char * name)
{
struct configfs_dirent * sd;
struct configfs_dirent * parent_sd = dir->d_fsdata;
if (d_really_is_negative(dir))
/* no inode means this hasn't been made visible yet */
return;
inode_lock(d_inode(dir));
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
continue;
if (!strcmp(configfs_get_name(sd), name)) {
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_drop_dentry(sd, dir);
configfs_put(sd);
break;
}
}
inode_unlock(d_inode(dir));
}
| linux-master | fs/configfs/inode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* symlink.c - operations for configfs symlinks.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
/* Protects attachments of new symlinks */
DEFINE_MUTEX(configfs_symlink_mutex);
static int item_depth(struct config_item * item)
{
struct config_item * p = item;
int depth = 0;
do { depth++; } while ((p = p->ci_parent) && !configfs_is_root(p));
return depth;
}
static int item_path_length(struct config_item * item)
{
struct config_item * p = item;
int length = 1;
do {
length += strlen(config_item_name(p)) + 1;
p = p->ci_parent;
} while (p && !configfs_is_root(p));
return length;
}
static void fill_item_path(struct config_item * item, char * buffer, int length)
{
struct config_item * p;
--length;
for (p = item; p && !configfs_is_root(p); p = p->ci_parent) {
int cur = strlen(config_item_name(p));
/* back up enough to print this bus id with '/' */
length -= cur;
memcpy(buffer + length, config_item_name(p), cur);
*(buffer + --length) = '/';
}
}
static int configfs_get_target_path(struct config_item *item,
struct config_item *target, char *path)
{
int depth, size;
char *s;
depth = item_depth(item);
size = item_path_length(target) + depth * 3 - 1;
if (size > PATH_MAX)
return -ENAMETOOLONG;
pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size);
for (s = path; depth--; s += 3)
strcpy(s,"../");
fill_item_path(target, path, size);
pr_debug("%s: path = '%s'\n", __func__, path);
return 0;
}
static int create_link(struct config_item *parent_item,
struct config_item *item,
struct dentry *dentry)
{
struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata;
char *body;
int ret;
if (!configfs_dirent_is_ready(target_sd))
return -ENOENT;
body = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!body)
return -ENOMEM;
configfs_get(target_sd);
spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
configfs_put(target_sd);
kfree(body);
return -ENOENT;
}
target_sd->s_links++;
spin_unlock(&configfs_dirent_lock);
ret = configfs_get_target_path(parent_item, item, body);
if (!ret)
ret = configfs_create_link(target_sd, parent_item->ci_dentry,
dentry, body);
if (ret) {
spin_lock(&configfs_dirent_lock);
target_sd->s_links--;
spin_unlock(&configfs_dirent_lock);
configfs_put(target_sd);
kfree(body);
}
return ret;
}
static int get_target(const char *symname, struct path *path,
struct config_item **target, struct super_block *sb)
{
int ret;
ret = kern_path(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, path);
if (!ret) {
if (path->dentry->d_sb == sb) {
*target = configfs_get_config_item(path->dentry);
if (!*target) {
ret = -ENOENT;
path_put(path);
}
} else {
ret = -EPERM;
path_put(path);
}
}
return ret;
}
int configfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *symname)
{
int ret;
struct path path;
struct configfs_dirent *sd;
struct config_item *parent_item;
struct config_item *target_item = NULL;
const struct config_item_type *type;
sd = dentry->d_parent->d_fsdata;
/*
* Fake invisibility if dir belongs to a group/default groups hierarchy
* being attached
*/
if (!configfs_dirent_is_ready(sd))
return -ENOENT;
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
ret = -EPERM;
if (!type || !type->ct_item_ops ||
!type->ct_item_ops->allow_link)
goto out_put;
/*
* This is really sick. What they wanted was a hybrid of
* link(2) and symlink(2) - they wanted the target resolved
* at syscall time (as link(2) would've done), be a directory
* (which link(2) would've refused to do) *AND* be a deep
* fucking magic, making the target busy from rmdir POV.
* symlink(2) is nothing of that sort, and the locking it
* gets matches the normal symlink(2) semantics. Without
* attempts to resolve the target (which might very well
* not even exist yet) done prior to locking the parent
* directory. This perversion, OTOH, needs to resolve
* the target, which would lead to obvious deadlocks if
* attempted with any directories locked.
*
* Unfortunately, that garbage is userland ABI and we should've
* said "no" back in 2005. Too late now, so we get to
* play very ugly games with locking.
*
* Try *ANYTHING* of that sort in new code, and you will
* really regret it. Just ask yourself - what could a BOFH
* do to me and do I want to find it out first-hand?
*
* AV, a thoroughly annoyed bastard.
*/
inode_unlock(dir);
ret = get_target(symname, &path, &target_item, dentry->d_sb);
inode_lock(dir);
if (ret)
goto out_put;
if (dentry->d_inode || d_unhashed(dentry))
ret = -EEXIST;
else
ret = inode_permission(&nop_mnt_idmap, dir,
MAY_WRITE | MAY_EXEC);
if (!ret)
ret = type->ct_item_ops->allow_link(parent_item, target_item);
if (!ret) {
mutex_lock(&configfs_symlink_mutex);
ret = create_link(parent_item, target_item, dentry);
mutex_unlock(&configfs_symlink_mutex);
if (ret && type->ct_item_ops->drop_link)
type->ct_item_ops->drop_link(parent_item,
target_item);
}
config_item_put(target_item);
path_put(&path);
out_put:
config_item_put(parent_item);
return ret;
}
int configfs_unlink(struct inode *dir, struct dentry *dentry)
{
struct configfs_dirent *sd = dentry->d_fsdata, *target_sd;
struct config_item *parent_item;
const struct config_item_type *type;
int ret;
ret = -EPERM; /* What lack-of-symlink returns */
if (!(sd->s_type & CONFIGFS_ITEM_LINK))
goto out;
target_sd = sd->s_element;
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_drop_dentry(sd, dentry->d_parent);
dput(dentry);
configfs_put(sd);
/*
* drop_link() must be called before
* decrementing target's ->s_links, so that the order of
* drop_link(this, target) and drop_item(target) is preserved.
*/
if (type && type->ct_item_ops &&
type->ct_item_ops->drop_link)
type->ct_item_ops->drop_link(parent_item,
target_sd->s_element);
spin_lock(&configfs_dirent_lock);
target_sd->s_links--;
spin_unlock(&configfs_dirent_lock);
configfs_put(target_sd);
config_item_put(parent_item);
ret = 0;
out:
return ret;
}
const struct inode_operations configfs_symlink_inode_operations = {
.get_link = simple_get_link,
.setattr = configfs_setattr,
};
| linux-master | fs/configfs/symlink.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* file.c - operations for regular (text) files.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
/*
* A simple attribute can only be 4096 characters. Why 4k? Because the
* original code limited it to PAGE_SIZE. That's a bad idea, though,
* because an attribute of 16k on ia64 won't work on x86. So we limit to
* 4k, our minimum common page size.
*/
#define SIMPLE_ATTR_SIZE 4096
struct configfs_buffer {
size_t count;
loff_t pos;
char * page;
struct configfs_item_operations * ops;
struct mutex mutex;
int needs_read_fill;
bool read_in_progress;
bool write_in_progress;
char *bin_buffer;
int bin_buffer_size;
int cb_max_size;
struct config_item *item;
struct module *owner;
union {
struct configfs_attribute *attr;
struct configfs_bin_attribute *bin_attr;
};
};
static inline struct configfs_fragment *to_frag(struct file *file)
{
struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
return sd->s_frag;
}
static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
{
struct configfs_fragment *frag = to_frag(file);
ssize_t count = -ENOENT;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
down_read(&frag->frag_sem);
if (!frag->frag_dead)
count = buffer->attr->show(buffer->item, buffer->page);
up_read(&frag->frag_sem);
if (count < 0)
return count;
if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
return -EIO;
buffer->needs_read_fill = 0;
buffer->count = count;
return 0;
}
static ssize_t configfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
retval = fill_read_buffer(file, buffer);
if (retval)
goto out;
}
pr_debug("%s: count = %zd, pos = %lld, buf = %s\n",
__func__, iov_iter_count(to), iocb->ki_pos, buffer->page);
if (iocb->ki_pos >= buffer->count)
goto out;
retval = copy_to_iter(buffer->page + iocb->ki_pos,
buffer->count - iocb->ki_pos, to);
iocb->ki_pos += retval;
if (retval == 0)
retval = -EFAULT;
out:
mutex_unlock(&buffer->mutex);
return retval;
}
static ssize_t configfs_bin_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct configfs_fragment *frag = to_frag(file);
struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
ssize_t len;
mutex_lock(&buffer->mutex);
/* we don't support switching read/write modes */
if (buffer->write_in_progress) {
retval = -ETXTBSY;
goto out;
}
buffer->read_in_progress = true;
if (buffer->needs_read_fill) {
/* perform first read with buf == NULL to get extent */
down_read(&frag->frag_sem);
if (!frag->frag_dead)
len = buffer->bin_attr->read(buffer->item, NULL, 0);
else
len = -ENOENT;
up_read(&frag->frag_sem);
if (len <= 0) {
retval = len;
goto out;
}
/* do not exceed the maximum value */
if (buffer->cb_max_size && len > buffer->cb_max_size) {
retval = -EFBIG;
goto out;
}
buffer->bin_buffer = vmalloc(len);
if (buffer->bin_buffer == NULL) {
retval = -ENOMEM;
goto out;
}
buffer->bin_buffer_size = len;
/* perform second read to fill buffer */
down_read(&frag->frag_sem);
if (!frag->frag_dead)
len = buffer->bin_attr->read(buffer->item,
buffer->bin_buffer, len);
else
len = -ENOENT;
up_read(&frag->frag_sem);
if (len < 0) {
retval = len;
vfree(buffer->bin_buffer);
buffer->bin_buffer_size = 0;
buffer->bin_buffer = NULL;
goto out;
}
buffer->needs_read_fill = 0;
}
if (iocb->ki_pos >= buffer->bin_buffer_size)
goto out;
retval = copy_to_iter(buffer->bin_buffer + iocb->ki_pos,
buffer->bin_buffer_size - iocb->ki_pos, to);
iocb->ki_pos += retval;
if (retval == 0)
retval = -EFAULT;
out:
mutex_unlock(&buffer->mutex);
return retval;
}
/* Fill @buffer with data coming from @from. */
static int fill_write_buffer(struct configfs_buffer *buffer,
struct iov_iter *from)
{
int copied;
if (!buffer->page)
buffer->page = (char *)__get_free_pages(GFP_KERNEL, 0);
if (!buffer->page)
return -ENOMEM;
copied = copy_from_iter(buffer->page, SIMPLE_ATTR_SIZE - 1, from);
buffer->needs_read_fill = 1;
/* if buf is assumed to contain a string, terminate it by \0,
* so e.g. sscanf() can scan the string easily */
buffer->page[copied] = 0;
return copied ? : -EFAULT;
}
static int
flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
{
struct configfs_fragment *frag = to_frag(file);
int res = -ENOENT;
down_read(&frag->frag_sem);
if (!frag->frag_dead)
res = buffer->attr->store(buffer->item, buffer->page, count);
up_read(&frag->frag_sem);
return res;
}
/*
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come on the
* first write.
* Hint: if you're writing a value, first read the file, modify only the value
* you're changing, then write entire buffer back.
*/
static ssize_t configfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct configfs_buffer *buffer = file->private_data;
int len;
mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, from);
if (len > 0)
len = flush_write_buffer(file, buffer, len);
if (len > 0)
iocb->ki_pos += len;
mutex_unlock(&buffer->mutex);
return len;
}
static ssize_t configfs_bin_write_iter(struct kiocb *iocb,
struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct configfs_buffer *buffer = file->private_data;
void *tbuf = NULL;
size_t end_offset;
ssize_t len;
mutex_lock(&buffer->mutex);
/* we don't support switching read/write modes */
if (buffer->read_in_progress) {
len = -ETXTBSY;
goto out;
}
buffer->write_in_progress = true;
/* buffer grows? */
end_offset = iocb->ki_pos + iov_iter_count(from);
if (end_offset > buffer->bin_buffer_size) {
if (buffer->cb_max_size && end_offset > buffer->cb_max_size) {
len = -EFBIG;
goto out;
}
tbuf = vmalloc(end_offset);
if (tbuf == NULL) {
len = -ENOMEM;
goto out;
}
/* copy old contents */
if (buffer->bin_buffer) {
memcpy(tbuf, buffer->bin_buffer,
buffer->bin_buffer_size);
vfree(buffer->bin_buffer);
}
/* clear the new area */
memset(tbuf + buffer->bin_buffer_size, 0,
end_offset - buffer->bin_buffer_size);
buffer->bin_buffer = tbuf;
buffer->bin_buffer_size = end_offset;
}
len = copy_from_iter(buffer->bin_buffer + iocb->ki_pos,
buffer->bin_buffer_size - iocb->ki_pos, from);
iocb->ki_pos += len;
out:
mutex_unlock(&buffer->mutex);
return len ? : -EFAULT;
}
static int __configfs_open_file(struct inode *inode, struct file *file, int type)
{
struct dentry *dentry = file->f_path.dentry;
struct configfs_fragment *frag = to_frag(file);
struct configfs_attribute *attr;
struct configfs_buffer *buffer;
int error;
error = -ENOMEM;
buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
if (!buffer)
goto out;
error = -ENOENT;
down_read(&frag->frag_sem);
if (unlikely(frag->frag_dead))
goto out_free_buffer;
error = -EINVAL;
buffer->item = to_item(dentry->d_parent);
if (!buffer->item)
goto out_free_buffer;
attr = to_attr(dentry);
if (!attr)
goto out_free_buffer;
if (type & CONFIGFS_ITEM_BIN_ATTR) {
buffer->bin_attr = to_bin_attr(dentry);
buffer->cb_max_size = buffer->bin_attr->cb_max_size;
} else {
buffer->attr = attr;
}
buffer->owner = attr->ca_owner;
/* Grab the module reference for this attribute if we have one */
error = -ENODEV;
if (!try_module_get(buffer->owner))
goto out_free_buffer;
error = -EACCES;
if (!buffer->item->ci_type)
goto out_put_module;
buffer->ops = buffer->item->ci_type->ct_item_ops;
/* File needs write support.
* The inode's perms must say it's ok,
* and we must have a store method.
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO))
goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
goto out_put_module;
if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
goto out_put_module;
}
/* File needs read support.
* The inode's perms must say it's ok, and we there
* must be a show method for it.
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO))
goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
goto out_put_module;
if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
goto out_put_module;
}
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->read_in_progress = false;
buffer->write_in_progress = false;
file->private_data = buffer;
up_read(&frag->frag_sem);
return 0;
out_put_module:
module_put(buffer->owner);
out_free_buffer:
up_read(&frag->frag_sem);
kfree(buffer);
out:
return error;
}
static int configfs_release(struct inode *inode, struct file *filp)
{
struct configfs_buffer *buffer = filp->private_data;
module_put(buffer->owner);
if (buffer->page)
free_page((unsigned long)buffer->page);
mutex_destroy(&buffer->mutex);
kfree(buffer);
return 0;
}
static int configfs_open_file(struct inode *inode, struct file *filp)
{
return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
}
static int configfs_open_bin_file(struct inode *inode, struct file *filp)
{
return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
}
static int configfs_release_bin_file(struct inode *inode, struct file *file)
{
struct configfs_buffer *buffer = file->private_data;
if (buffer->write_in_progress) {
struct configfs_fragment *frag = to_frag(file);
down_read(&frag->frag_sem);
if (!frag->frag_dead) {
/* result of ->release() is ignored */
buffer->bin_attr->write(buffer->item,
buffer->bin_buffer,
buffer->bin_buffer_size);
}
up_read(&frag->frag_sem);
}
vfree(buffer->bin_buffer);
configfs_release(inode, file);
return 0;
}
const struct file_operations configfs_file_operations = {
.read_iter = configfs_read_iter,
.write_iter = configfs_write_iter,
.llseek = generic_file_llseek,
.open = configfs_open_file,
.release = configfs_release,
};
const struct file_operations configfs_bin_file_operations = {
.read_iter = configfs_bin_read_iter,
.write_iter = configfs_bin_write_iter,
.llseek = NULL, /* bin file is not seekable */
.open = configfs_open_bin_file,
.release = configfs_release_bin_file,
};
/**
* configfs_create_file - create an attribute file for an item.
* @item: item we're creating for.
* @attr: atrribute descriptor.
*/
int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
{
struct dentry *dir = item->ci_dentry;
struct configfs_dirent *parent_sd = dir->d_fsdata;
umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
int error = 0;
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
inode_unlock(d_inode(dir));
return error;
}
/**
* configfs_create_bin_file - create a binary attribute file for an item.
* @item: item we're creating for.
* @bin_attr: atrribute descriptor.
*/
int configfs_create_bin_file(struct config_item *item,
const struct configfs_bin_attribute *bin_attr)
{
struct dentry *dir = item->ci_dentry;
struct configfs_dirent *parent_sd = dir->d_fsdata;
umode_t mode = (bin_attr->cb_attr.ca_mode & S_IALLUGO) | S_IFREG;
int error = 0;
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
inode_unlock(dir->d_inode);
return error;
}
| linux-master | fs/configfs/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* item.c - library routines for handling generic config items
*
* Based on kobject:
* kobject is Copyright (c) 2002-2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* Please see the file Documentation/filesystems/configfs.rst for
* critical information about using the config_item interface.
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/configfs.h>
static inline struct config_item *to_item(struct list_head *entry)
{
return container_of(entry, struct config_item, ci_entry);
}
/* Evil kernel */
static void config_item_release(struct kref *kref);
/**
* config_item_init - initialize item.
* @item: item in question.
*/
static void config_item_init(struct config_item *item)
{
kref_init(&item->ci_kref);
INIT_LIST_HEAD(&item->ci_entry);
}
/**
* config_item_set_name - Set the name of an item
* @item: item.
* @fmt: The vsnprintf()'s format string.
*
* If strlen(name) >= CONFIGFS_ITEM_NAME_LEN, then use a
* dynamically allocated string that @item->ci_name points to.
* Otherwise, use the static @item->ci_namebuf array.
*/
int config_item_set_name(struct config_item *item, const char *fmt, ...)
{
int limit = CONFIGFS_ITEM_NAME_LEN;
int need;
va_list args;
char *name;
/*
* First, try the static array
*/
va_start(args, fmt);
need = vsnprintf(item->ci_namebuf, limit, fmt, args);
va_end(args);
if (need < limit)
name = item->ci_namebuf;
else {
va_start(args, fmt);
name = kvasprintf(GFP_KERNEL, fmt, args);
va_end(args);
if (!name)
return -EFAULT;
}
/* Free the old name, if necessary. */
if (item->ci_name && item->ci_name != item->ci_namebuf)
kfree(item->ci_name);
/* Now, set the new name */
item->ci_name = name;
return 0;
}
EXPORT_SYMBOL(config_item_set_name);
void config_item_init_type_name(struct config_item *item,
const char *name,
const struct config_item_type *type)
{
config_item_set_name(item, "%s", name);
item->ci_type = type;
config_item_init(item);
}
EXPORT_SYMBOL(config_item_init_type_name);
void config_group_init_type_name(struct config_group *group, const char *name,
const struct config_item_type *type)
{
config_item_set_name(&group->cg_item, "%s", name);
group->cg_item.ci_type = type;
config_group_init(group);
}
EXPORT_SYMBOL(config_group_init_type_name);
struct config_item *config_item_get(struct config_item *item)
{
if (item)
kref_get(&item->ci_kref);
return item;
}
EXPORT_SYMBOL(config_item_get);
struct config_item *config_item_get_unless_zero(struct config_item *item)
{
if (item && kref_get_unless_zero(&item->ci_kref))
return item;
return NULL;
}
EXPORT_SYMBOL(config_item_get_unless_zero);
static void config_item_cleanup(struct config_item *item)
{
const struct config_item_type *t = item->ci_type;
struct config_group *s = item->ci_group;
struct config_item *parent = item->ci_parent;
pr_debug("config_item %s: cleaning up\n", config_item_name(item));
if (item->ci_name != item->ci_namebuf)
kfree(item->ci_name);
item->ci_name = NULL;
if (t && t->ct_item_ops && t->ct_item_ops->release)
t->ct_item_ops->release(item);
if (s)
config_group_put(s);
if (parent)
config_item_put(parent);
}
static void config_item_release(struct kref *kref)
{
config_item_cleanup(container_of(kref, struct config_item, ci_kref));
}
/**
* config_item_put - decrement refcount for item.
* @item: item.
*
* Decrement the refcount, and if 0, call config_item_cleanup().
*/
void config_item_put(struct config_item *item)
{
if (item)
kref_put(&item->ci_kref, config_item_release);
}
EXPORT_SYMBOL(config_item_put);
/**
* config_group_init - initialize a group for use
* @group: config_group
*/
void config_group_init(struct config_group *group)
{
config_item_init(&group->cg_item);
INIT_LIST_HEAD(&group->cg_children);
INIT_LIST_HEAD(&group->default_groups);
}
EXPORT_SYMBOL(config_group_init);
/**
* config_group_find_item - search for item in group.
* @group: group we're looking in.
* @name: item's name.
*
* Iterate over @group->cg_list, looking for a matching config_item.
* If matching item is found take a reference and return the item.
* Caller must have locked group via @group->cg_subsys->su_mtx.
*/
struct config_item *config_group_find_item(struct config_group *group,
const char *name)
{
struct list_head *entry;
struct config_item *ret = NULL;
list_for_each(entry, &group->cg_children) {
struct config_item *item = to_item(entry);
if (config_item_name(item) &&
!strcmp(config_item_name(item), name)) {
ret = config_item_get(item);
break;
}
}
return ret;
}
EXPORT_SYMBOL(config_group_find_item);
| linux-master | fs/configfs/item.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache interface to CacheFiles
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/xattr.h>
#include <linux/file.h>
#include <linux/falloc.h>
#include <trace/events/fscache.h>
#include "internal.h"
static atomic_t cachefiles_object_debug_id;
/*
* Allocate a cache object record.
*/
static
struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie)
{
struct fscache_volume *vcookie = cookie->volume;
struct cachefiles_volume *volume = vcookie->cache_priv;
struct cachefiles_object *object;
_enter("{%s},%x,", vcookie->key, cookie->debug_id);
object = kmem_cache_zalloc(cachefiles_object_jar, GFP_KERNEL);
if (!object)
return NULL;
refcount_set(&object->ref, 1);
spin_lock_init(&object->lock);
INIT_LIST_HEAD(&object->cache_link);
object->volume = volume;
object->debug_id = atomic_inc_return(&cachefiles_object_debug_id);
object->cookie = fscache_get_cookie(cookie, fscache_cookie_get_attach_object);
fscache_count_object(vcookie->cache);
trace_cachefiles_ref(object->debug_id, cookie->debug_id, 1,
cachefiles_obj_new);
return object;
}
/*
* Note that an object has been seen.
*/
void cachefiles_see_object(struct cachefiles_object *object,
enum cachefiles_obj_ref_trace why)
{
trace_cachefiles_ref(object->debug_id, object->cookie->debug_id,
refcount_read(&object->ref), why);
}
/*
* Increment the usage count on an object;
*/
struct cachefiles_object *cachefiles_grab_object(struct cachefiles_object *object,
enum cachefiles_obj_ref_trace why)
{
int r;
__refcount_inc(&object->ref, &r);
trace_cachefiles_ref(object->debug_id, object->cookie->debug_id, r, why);
return object;
}
/*
* dispose of a reference to an object
*/
void cachefiles_put_object(struct cachefiles_object *object,
enum cachefiles_obj_ref_trace why)
{
unsigned int object_debug_id = object->debug_id;
unsigned int cookie_debug_id = object->cookie->debug_id;
struct fscache_cache *cache;
bool done;
int r;
done = __refcount_dec_and_test(&object->ref, &r);
trace_cachefiles_ref(object_debug_id, cookie_debug_id, r, why);
if (done) {
_debug("- kill object OBJ%x", object_debug_id);
ASSERTCMP(object->file, ==, NULL);
kfree(object->d_name);
cache = object->volume->cache->cache;
fscache_put_cookie(object->cookie, fscache_cookie_put_object);
object->cookie = NULL;
kmem_cache_free(cachefiles_object_jar, object);
fscache_uncount_object(cache);
}
_leave("");
}
/*
* Adjust the size of a cache file if necessary to match the DIO size. We keep
* the EOF marker a multiple of DIO blocks so that we don't fall back to doing
* non-DIO for a partial block straddling the EOF, but we also have to be
* careful of someone expanding the file and accidentally accreting the
* padding.
*/
static int cachefiles_adjust_size(struct cachefiles_object *object)
{
struct iattr newattrs;
struct file *file = object->file;
uint64_t ni_size;
loff_t oi_size;
int ret;
ni_size = object->cookie->object_size;
ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
_enter("{OBJ%x},[%llu]",
object->debug_id, (unsigned long long) ni_size);
if (!file)
return -ENOBUFS;
oi_size = i_size_read(file_inode(file));
if (oi_size == ni_size)
return 0;
inode_lock(file_inode(file));
/* if there's an extension to a partial page at the end of the backing
* file, we need to discard the partial page so that we pick up new
* data after it */
if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
_debug("discard tail %llx", oi_size);
newattrs.ia_valid = ATTR_SIZE;
newattrs.ia_size = oi_size & PAGE_MASK;
ret = cachefiles_inject_remove_error();
if (ret == 0)
ret = notify_change(&nop_mnt_idmap, file->f_path.dentry,
&newattrs, NULL);
if (ret < 0)
goto truncate_failed;
}
newattrs.ia_valid = ATTR_SIZE;
newattrs.ia_size = ni_size;
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = notify_change(&nop_mnt_idmap, file->f_path.dentry,
&newattrs, NULL);
truncate_failed:
inode_unlock(file_inode(file));
if (ret < 0)
trace_cachefiles_io_error(NULL, file_inode(file), ret,
cachefiles_trace_notify_change_error);
if (ret == -EIO) {
cachefiles_io_error_obj(object, "Size set failed");
ret = -ENOBUFS;
}
_leave(" = %d", ret);
return ret;
}
/*
* Attempt to look up the nominated node in this cache
*/
static bool cachefiles_lookup_cookie(struct fscache_cookie *cookie)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache = cookie->volume->cache->cache_priv;
const struct cred *saved_cred;
bool success;
object = cachefiles_alloc_object(cookie);
if (!object)
goto fail;
_enter("{OBJ%x}", object->debug_id);
if (!cachefiles_cook_key(object))
goto fail_put;
cookie->cache_priv = object;
cachefiles_begin_secure(cache, &saved_cred);
success = cachefiles_look_up_object(object);
if (!success)
goto fail_withdraw;
cachefiles_see_object(object, cachefiles_obj_see_lookup_cookie);
spin_lock(&cache->object_list_lock);
list_add(&object->cache_link, &cache->object_list);
spin_unlock(&cache->object_list_lock);
cachefiles_adjust_size(object);
cachefiles_end_secure(cache, saved_cred);
_leave(" = t");
return true;
fail_withdraw:
cachefiles_end_secure(cache, saved_cred);
cachefiles_see_object(object, cachefiles_obj_see_lookup_failed);
fscache_caching_failed(cookie);
_debug("failed c=%08x o=%08x", cookie->debug_id, object->debug_id);
/* The caller holds an access count on the cookie, so we need them to
* drop it before we can withdraw the object.
*/
return false;
fail_put:
cachefiles_put_object(object, cachefiles_obj_put_alloc_fail);
fail:
return false;
}
/*
* Shorten the backing object to discard any dirty data and free up
* any unused granules.
*/
static bool cachefiles_shorten_object(struct cachefiles_object *object,
struct file *file, loff_t new_size)
{
struct cachefiles_cache *cache = object->volume->cache;
struct inode *inode = file_inode(file);
loff_t i_size, dio_size;
int ret;
dio_size = round_up(new_size, CACHEFILES_DIO_BLOCK_SIZE);
i_size = i_size_read(inode);
trace_cachefiles_trunc(object, inode, i_size, dio_size,
cachefiles_trunc_shrink);
ret = cachefiles_inject_remove_error();
if (ret == 0)
ret = vfs_truncate(&file->f_path, dio_size);
if (ret < 0) {
trace_cachefiles_io_error(object, file_inode(file), ret,
cachefiles_trace_trunc_error);
cachefiles_io_error_obj(object, "Trunc-to-size failed %d", ret);
cachefiles_remove_object_xattr(cache, object, file->f_path.dentry);
return false;
}
if (new_size < dio_size) {
trace_cachefiles_trunc(object, inode, dio_size, new_size,
cachefiles_trunc_dio_adjust);
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_fallocate(file, FALLOC_FL_ZERO_RANGE,
new_size, dio_size - new_size);
if (ret < 0) {
trace_cachefiles_io_error(object, file_inode(file), ret,
cachefiles_trace_fallocate_error);
cachefiles_io_error_obj(object, "Trunc-to-dio-size failed %d", ret);
cachefiles_remove_object_xattr(cache, object, file->f_path.dentry);
return false;
}
}
return true;
}
/*
* Resize the backing object.
*/
static void cachefiles_resize_cookie(struct netfs_cache_resources *cres,
loff_t new_size)
{
struct cachefiles_object *object = cachefiles_cres_object(cres);
struct cachefiles_cache *cache = object->volume->cache;
struct fscache_cookie *cookie = object->cookie;
const struct cred *saved_cred;
struct file *file = cachefiles_cres_file(cres);
loff_t old_size = cookie->object_size;
_enter("%llu->%llu", old_size, new_size);
if (new_size < old_size) {
cachefiles_begin_secure(cache, &saved_cred);
cachefiles_shorten_object(object, file, new_size);
cachefiles_end_secure(cache, saved_cred);
object->cookie->object_size = new_size;
return;
}
/* The file is being expanded. We don't need to do anything
* particularly. cookie->initial_size doesn't change and so the point
* at which we have to download before doesn't change.
*/
cookie->object_size = new_size;
}
/*
* Commit changes to the object as we drop it.
*/
static void cachefiles_commit_object(struct cachefiles_object *object,
struct cachefiles_cache *cache)
{
bool update = false;
if (test_and_clear_bit(FSCACHE_COOKIE_LOCAL_WRITE, &object->cookie->flags))
update = true;
if (test_and_clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags))
update = true;
if (update)
cachefiles_set_object_xattr(object);
if (test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags))
cachefiles_commit_tmpfile(cache, object);
}
/*
* Finalise and object and close the VFS structs that we have.
*/
static void cachefiles_clean_up_object(struct cachefiles_object *object,
struct cachefiles_cache *cache)
{
if (test_bit(FSCACHE_COOKIE_RETIRED, &object->cookie->flags)) {
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
cachefiles_see_object(object, cachefiles_obj_see_clean_delete);
_debug("- inval object OBJ%x", object->debug_id);
cachefiles_delete_object(object, FSCACHE_OBJECT_WAS_RETIRED);
} else {
cachefiles_see_object(object, cachefiles_obj_see_clean_drop_tmp);
_debug("- inval object OBJ%x tmpfile", object->debug_id);
}
} else {
cachefiles_see_object(object, cachefiles_obj_see_clean_commit);
cachefiles_commit_object(object, cache);
}
cachefiles_unmark_inode_in_use(object, object->file);
if (object->file) {
fput(object->file);
object->file = NULL;
}
}
/*
* Withdraw caching for a cookie.
*/
static void cachefiles_withdraw_cookie(struct fscache_cookie *cookie)
{
struct cachefiles_object *object = cookie->cache_priv;
struct cachefiles_cache *cache = object->volume->cache;
const struct cred *saved_cred;
_enter("o=%x", object->debug_id);
cachefiles_see_object(object, cachefiles_obj_see_withdraw_cookie);
if (!list_empty(&object->cache_link)) {
spin_lock(&cache->object_list_lock);
cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
list_del_init(&object->cache_link);
spin_unlock(&cache->object_list_lock);
}
cachefiles_ondemand_clean_object(object);
if (object->file) {
cachefiles_begin_secure(cache, &saved_cred);
cachefiles_clean_up_object(object, cache);
cachefiles_end_secure(cache, saved_cred);
}
cookie->cache_priv = NULL;
cachefiles_put_object(object, cachefiles_obj_put_detach);
}
/*
* Invalidate the storage associated with a cookie.
*/
static bool cachefiles_invalidate_cookie(struct fscache_cookie *cookie)
{
struct cachefiles_object *object = cookie->cache_priv;
struct file *new_file, *old_file;
bool old_tmpfile;
_enter("o=%x,[%llu]", object->debug_id, object->cookie->object_size);
old_tmpfile = test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
if (!object->file) {
fscache_resume_after_invalidation(cookie);
_leave(" = t [light]");
return true;
}
new_file = cachefiles_create_tmpfile(object);
if (IS_ERR(new_file))
goto failed;
/* Substitute the VFS target */
_debug("sub");
spin_lock(&object->lock);
old_file = object->file;
object->file = new_file;
object->content_info = CACHEFILES_CONTENT_NO_DATA;
set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
spin_unlock(&object->lock);
_debug("subbed");
/* Allow I/O to take place again */
fscache_resume_after_invalidation(cookie);
if (old_file) {
if (!old_tmpfile) {
struct cachefiles_volume *volume = object->volume;
struct dentry *fan = volume->fanout[(u8)cookie->key_hash];
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
cachefiles_bury_object(volume->cache, object, fan,
old_file->f_path.dentry,
FSCACHE_OBJECT_INVALIDATED);
}
fput(old_file);
}
_leave(" = t");
return true;
failed:
_leave(" = f");
return false;
}
const struct fscache_cache_ops cachefiles_cache_ops = {
.name = "cachefiles",
.acquire_volume = cachefiles_acquire_volume,
.free_volume = cachefiles_free_volume,
.lookup_cookie = cachefiles_lookup_cookie,
.withdraw_cookie = cachefiles_withdraw_cookie,
.invalidate_cookie = cachefiles_invalidate_cookie,
.begin_operation = cachefiles_begin_operation,
.resize_cookie = cachefiles_resize_cookie,
.prepare_to_write = cachefiles_prepare_to_write,
};
| linux-master | fs/cachefiles/interface.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Error injection handling.
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/sysctl.h>
#include "internal.h"
unsigned int cachefiles_error_injection_state;
static struct ctl_table_header *cachefiles_sysctl;
static struct ctl_table cachefiles_sysctls[] = {
{
.procname = "error_injection",
.data = &cachefiles_error_injection_state,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec,
},
{}
};
int __init cachefiles_register_error_injection(void)
{
cachefiles_sysctl = register_sysctl("cachefiles", cachefiles_sysctls);
if (!cachefiles_sysctl)
return -ENOMEM;
return 0;
}
void cachefiles_unregister_error_injection(void)
{
unregister_sysctl_table(cachefiles_sysctl);
}
| linux-master | fs/cachefiles/error_inject.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles extended attribute management
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/xattr.h>
#include <linux/slab.h>
#include "internal.h"
#define CACHEFILES_COOKIE_TYPE_DATA 1
struct cachefiles_xattr {
__be64 object_size; /* Actual size of the object */
__be64 zero_point; /* Size after which server has no data not written by us */
__u8 type; /* Type of object */
__u8 content; /* Content presence (enum cachefiles_content) */
__u8 data[]; /* netfs coherency data */
} __packed;
static const char cachefiles_xattr_cache[] =
XATTR_USER_PREFIX "CacheFiles.cache";
struct cachefiles_vol_xattr {
__be32 reserved; /* Reserved, should be 0 */
__u8 data[]; /* netfs volume coherency data */
} __packed;
/*
* set the state xattr on a cache file
*/
int cachefiles_set_object_xattr(struct cachefiles_object *object)
{
struct cachefiles_xattr *buf;
struct dentry *dentry;
struct file *file = object->file;
unsigned int len = object->cookie->aux_len;
int ret;
if (!file)
return -ESTALE;
dentry = file->f_path.dentry;
_enter("%x,#%d", object->debug_id, len);
buf = kmalloc(sizeof(struct cachefiles_xattr) + len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->object_size = cpu_to_be64(object->cookie->object_size);
buf->zero_point = 0;
buf->type = CACHEFILES_COOKIE_TYPE_DATA;
buf->content = object->content_info;
if (test_bit(FSCACHE_COOKIE_LOCAL_WRITE, &object->cookie->flags))
buf->content = CACHEFILES_CONTENT_DIRTY;
if (len > 0)
memcpy(buf->data, fscache_get_aux(object->cookie), len);
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache,
buf, sizeof(struct cachefiles_xattr) + len, 0);
if (ret < 0) {
trace_cachefiles_vfs_error(object, file_inode(file), ret,
cachefiles_trace_setxattr_error);
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
buf->content,
cachefiles_coherency_set_fail);
if (ret != -ENOMEM)
cachefiles_io_error_obj(
object,
"Failed to set xattr with error %d", ret);
} else {
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
buf->content,
cachefiles_coherency_set_ok);
}
kfree(buf);
_leave(" = %d", ret);
return ret;
}
/*
* check the consistency between the backing cache and the FS-Cache cookie
*/
int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file)
{
struct cachefiles_xattr *buf;
struct dentry *dentry = file->f_path.dentry;
unsigned int len = object->cookie->aux_len, tlen;
const void *p = fscache_get_aux(object->cookie);
enum cachefiles_coherency_trace why;
ssize_t xlen;
int ret = -ESTALE;
tlen = sizeof(struct cachefiles_xattr) + len;
buf = kmalloc(tlen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
xlen = cachefiles_inject_read_error();
if (xlen == 0)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
if (xlen != tlen) {
if (xlen < 0)
trace_cachefiles_vfs_error(object, file_inode(file), xlen,
cachefiles_trace_getxattr_error);
if (xlen == -EIO)
cachefiles_io_error_obj(
object,
"Failed to read aux with error %zd", xlen);
why = cachefiles_coherency_check_xattr;
} else if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
why = cachefiles_coherency_check_type;
} else if (memcmp(buf->data, p, len) != 0) {
why = cachefiles_coherency_check_aux;
} else if (be64_to_cpu(buf->object_size) != object->cookie->object_size) {
why = cachefiles_coherency_check_objsize;
} else if (buf->content == CACHEFILES_CONTENT_DIRTY) {
// TODO: Begin conflict resolution
pr_warn("Dirty object in cache\n");
why = cachefiles_coherency_check_dirty;
} else {
why = cachefiles_coherency_check_ok;
ret = 0;
}
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
buf->content, why);
kfree(buf);
return ret;
}
/*
* remove the object's xattr to mark it stale
*/
int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
struct cachefiles_object *object,
struct dentry *dentry)
{
int ret;
ret = cachefiles_inject_remove_error();
if (ret == 0)
ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache);
if (ret < 0) {
trace_cachefiles_vfs_error(object, d_inode(dentry), ret,
cachefiles_trace_remxattr_error);
if (ret == -ENOENT || ret == -ENODATA)
ret = 0;
else if (ret != -ENOMEM)
cachefiles_io_error(cache,
"Can't remove xattr from %lu"
" (error %d)",
d_backing_inode(dentry)->i_ino, -ret);
}
_leave(" = %d", ret);
return ret;
}
/*
* Stick a marker on the cache object to indicate that it's dirty.
*/
void cachefiles_prepare_to_write(struct fscache_cookie *cookie)
{
const struct cred *saved_cred;
struct cachefiles_object *object = cookie->cache_priv;
struct cachefiles_cache *cache = object->volume->cache;
_enter("c=%08x", object->cookie->debug_id);
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
cachefiles_begin_secure(cache, &saved_cred);
cachefiles_set_object_xattr(object);
cachefiles_end_secure(cache, saved_cred);
}
}
/*
* Set the state xattr on a volume directory.
*/
bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
{
struct cachefiles_vol_xattr *buf;
unsigned int len = volume->vcookie->coherency_len;
const void *p = volume->vcookie->coherency;
struct dentry *dentry = volume->dentry;
int ret;
_enter("%x,#%d", volume->vcookie->debug_id, len);
len += sizeof(*buf);
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return false;
buf->reserved = cpu_to_be32(0);
memcpy(buf->data, p, volume->vcookie->coherency_len);
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache,
buf, len, 0);
if (ret < 0) {
trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
cachefiles_trace_setxattr_error);
trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino,
cachefiles_coherency_vol_set_fail);
if (ret != -ENOMEM)
cachefiles_io_error(
volume->cache, "Failed to set xattr with error %d", ret);
} else {
trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino,
cachefiles_coherency_vol_set_ok);
}
kfree(buf);
_leave(" = %d", ret);
return ret == 0;
}
/*
* Check the consistency between the backing cache and the volume cookie.
*/
int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
{
struct cachefiles_vol_xattr *buf;
struct dentry *dentry = volume->dentry;
unsigned int len = volume->vcookie->coherency_len;
const void *p = volume->vcookie->coherency;
enum cachefiles_coherency_trace why;
ssize_t xlen;
int ret = -ESTALE;
_enter("");
len += sizeof(*buf);
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
xlen = cachefiles_inject_read_error();
if (xlen == 0)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
if (xlen != len) {
if (xlen < 0) {
trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
cachefiles_trace_getxattr_error);
if (xlen == -EIO)
cachefiles_io_error(
volume->cache,
"Failed to read xattr with error %zd", xlen);
}
why = cachefiles_coherency_vol_check_xattr;
} else if (buf->reserved != cpu_to_be32(0)) {
why = cachefiles_coherency_vol_check_resv;
} else if (memcmp(buf->data, p, len - sizeof(*buf)) != 0) {
why = cachefiles_coherency_vol_check_cmp;
} else {
why = cachefiles_coherency_vol_check_ok;
ret = 0;
}
trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino, why);
kfree(buf);
_leave(" = %d", ret);
return ret;
}
| linux-master | fs/cachefiles/xattr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* kiocb-using read/write
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/falloc.h>
#include <linux/sched/mm.h>
#include <trace/events/fscache.h>
#include "internal.h"
struct cachefiles_kiocb {
struct kiocb iocb;
refcount_t ki_refcnt;
loff_t start;
union {
size_t skipped;
size_t len;
};
struct cachefiles_object *object;
netfs_io_terminated_t term_func;
void *term_func_priv;
bool was_async;
unsigned int inval_counter; /* Copy of cookie->inval_counter */
u64 b_writing;
};
static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
{
if (refcount_dec_and_test(&ki->ki_refcnt)) {
cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
fput(ki->iocb.ki_filp);
kfree(ki);
}
}
/*
* Handle completion of a read from the cache.
*/
static void cachefiles_read_complete(struct kiocb *iocb, long ret)
{
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
struct inode *inode = file_inode(ki->iocb.ki_filp);
_enter("%ld", ret);
if (ret < 0)
trace_cachefiles_io_error(ki->object, inode, ret,
cachefiles_trace_read_error);
if (ki->term_func) {
if (ret >= 0) {
if (ki->object->cookie->inval_counter == ki->inval_counter)
ki->skipped += ret;
else
ret = -ESTALE;
}
ki->term_func(ki->term_func_priv, ret, ki->was_async);
}
cachefiles_put_kiocb(ki);
}
/*
* Initiate a read from the cache.
*/
static int cachefiles_read(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
enum netfs_read_from_hole read_hole,
netfs_io_terminated_t term_func,
void *term_func_priv)
{
struct cachefiles_object *object;
struct cachefiles_kiocb *ki;
struct file *file;
unsigned int old_nofs;
ssize_t ret = -ENOBUFS;
size_t len = iov_iter_count(iter), skipped = 0;
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
goto presubmission_error;
fscache_count_read();
object = cachefiles_cres_object(cres);
file = cachefiles_cres_file(cres);
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file_inode(file)));
/* If the caller asked us to seek for data before doing the read, then
* we should do that now. If we find a gap, we fill it with zeros.
*/
if (read_hole != NETFS_READ_HOLE_IGNORE) {
loff_t off = start_pos, off2;
off2 = cachefiles_inject_read_error();
if (off2 == 0)
off2 = vfs_llseek(file, off, SEEK_DATA);
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
skipped = 0;
ret = off2;
goto presubmission_error;
}
if (off2 == -ENXIO || off2 >= start_pos + len) {
/* The region is beyond the EOF or there's no more data
* in the region, so clear the rest of the buffer and
* return success.
*/
ret = -ENODATA;
if (read_hole == NETFS_READ_HOLE_FAIL)
goto presubmission_error;
iov_iter_zero(len, iter);
skipped = len;
ret = 0;
goto presubmission_error;
}
skipped = off2 - off;
iov_iter_zero(skipped, iter);
}
ret = -ENOMEM;
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki)
goto presubmission_error;
refcount_set(&ki->ki_refcnt, 2);
ki->iocb.ki_filp = file;
ki->iocb.ki_pos = start_pos + skipped;
ki->iocb.ki_flags = IOCB_DIRECT;
ki->iocb.ki_ioprio = get_current_ioprio();
ki->skipped = skipped;
ki->object = object;
ki->inval_counter = cres->inval_counter;
ki->term_func = term_func;
ki->term_func_priv = term_func_priv;
ki->was_async = true;
if (ki->term_func)
ki->iocb.ki_complete = cachefiles_read_complete;
get_file(ki->iocb.ki_filp);
cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
old_nofs = memalloc_nofs_save();
ret = cachefiles_inject_read_error();
if (ret == 0)
ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
memalloc_nofs_restore(old_nofs);
switch (ret) {
case -EIOCBQUEUED:
goto in_progress;
case -ERESTARTSYS:
case -ERESTARTNOINTR:
case -ERESTARTNOHAND:
case -ERESTART_RESTARTBLOCK:
/* There's no easy way to restart the syscall since other AIO's
* may be already running. Just fail this IO with EINTR.
*/
ret = -EINTR;
fallthrough;
default:
ki->was_async = false;
cachefiles_read_complete(&ki->iocb, ret);
if (ret > 0)
ret = 0;
break;
}
in_progress:
cachefiles_put_kiocb(ki);
_leave(" = %zd", ret);
return ret;
presubmission_error:
if (term_func)
term_func(term_func_priv, ret < 0 ? ret : skipped, false);
return ret;
}
/*
* Query the occupancy of the cache in a region, returning where the next chunk
* of data starts and how long it is.
*/
static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len)
{
struct cachefiles_object *object;
struct file *file;
loff_t off, off2;
*_data_start = -1;
*_data_len = 0;
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
return -ENOBUFS;
object = cachefiles_cres_object(cres);
file = cachefiles_cres_file(cres);
granularity = max_t(size_t, object->volume->cache->bsize, granularity);
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start, len,
i_size_read(file_inode(file)));
off = cachefiles_inject_read_error();
if (off == 0)
off = vfs_llseek(file, start, SEEK_DATA);
if (off == -ENXIO)
return -ENODATA; /* Beyond EOF */
if (off < 0 && off >= (loff_t)-MAX_ERRNO)
return -ENOBUFS; /* Error. */
if (round_up(off, granularity) >= start + len)
return -ENODATA; /* No data in range */
off2 = cachefiles_inject_read_error();
if (off2 == 0)
off2 = vfs_llseek(file, off, SEEK_HOLE);
if (off2 == -ENXIO)
return -ENODATA; /* Beyond EOF */
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
return -ENOBUFS; /* Error. */
/* Round away partial blocks */
off = round_up(off, granularity);
off2 = round_down(off2, granularity);
if (off2 <= off)
return -ENODATA;
*_data_start = off;
if (off2 > start + len)
*_data_len = len;
else
*_data_len = off2 - off;
return 0;
}
/*
* Handle completion of a write to the cache.
*/
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
{
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
struct cachefiles_object *object = ki->object;
struct inode *inode = file_inode(ki->iocb.ki_filp);
_enter("%ld", ret);
kiocb_end_write(iocb);
if (ret < 0)
trace_cachefiles_io_error(object, inode, ret,
cachefiles_trace_write_error);
atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
if (ki->term_func)
ki->term_func(ki->term_func_priv, ret, ki->was_async);
cachefiles_put_kiocb(ki);
}
/*
* Initiate a write to the cache.
*/
int __cachefiles_write(struct cachefiles_object *object,
struct file *file,
loff_t start_pos,
struct iov_iter *iter,
netfs_io_terminated_t term_func,
void *term_func_priv)
{
struct cachefiles_cache *cache;
struct cachefiles_kiocb *ki;
unsigned int old_nofs;
ssize_t ret;
size_t len = iov_iter_count(iter);
fscache_count_write();
cache = object->volume->cache;
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file_inode(file)));
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki) {
if (term_func)
term_func(term_func_priv, -ENOMEM, false);
return -ENOMEM;
}
refcount_set(&ki->ki_refcnt, 2);
ki->iocb.ki_filp = file;
ki->iocb.ki_pos = start_pos;
ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
ki->iocb.ki_ioprio = get_current_ioprio();
ki->object = object;
ki->start = start_pos;
ki->len = len;
ki->term_func = term_func;
ki->term_func_priv = term_func_priv;
ki->was_async = true;
ki->b_writing = (len + (1 << cache->bshift) - 1) >> cache->bshift;
if (ki->term_func)
ki->iocb.ki_complete = cachefiles_write_complete;
atomic_long_add(ki->b_writing, &cache->b_writing);
kiocb_start_write(&ki->iocb);
get_file(ki->iocb.ki_filp);
cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
trace_cachefiles_write(object, file_inode(file), ki->iocb.ki_pos, len);
old_nofs = memalloc_nofs_save();
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
memalloc_nofs_restore(old_nofs);
switch (ret) {
case -EIOCBQUEUED:
goto in_progress;
case -ERESTARTSYS:
case -ERESTARTNOINTR:
case -ERESTARTNOHAND:
case -ERESTART_RESTARTBLOCK:
/* There's no easy way to restart the syscall since other AIO's
* may be already running. Just fail this IO with EINTR.
*/
ret = -EINTR;
fallthrough;
default:
ki->was_async = false;
cachefiles_write_complete(&ki->iocb, ret);
if (ret > 0)
ret = 0;
break;
}
in_progress:
cachefiles_put_kiocb(ki);
_leave(" = %zd", ret);
return ret;
}
static int cachefiles_write(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
netfs_io_terminated_t term_func,
void *term_func_priv)
{
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
if (term_func)
term_func(term_func_priv, -ENOBUFS, false);
return -ENOBUFS;
}
return __cachefiles_write(cachefiles_cres_object(cres),
cachefiles_cres_file(cres),
start_pos, iter,
term_func, term_func_priv);
}
static inline enum netfs_io_source
cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
loff_t start, size_t *_len, loff_t i_size,
unsigned long *_flags, ino_t netfs_ino)
{
enum cachefiles_prepare_read_trace why;
struct cachefiles_object *object = NULL;
struct cachefiles_cache *cache;
struct fscache_cookie *cookie = fscache_cres_cookie(cres);
const struct cred *saved_cred;
struct file *file = cachefiles_cres_file(cres);
enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
size_t len = *_len;
loff_t off, to;
ino_t ino = file ? file_inode(file)->i_ino : 0;
int rc;
_enter("%zx @%llx/%llx", len, start, i_size);
if (start >= i_size) {
ret = NETFS_FILL_WITH_ZEROES;
why = cachefiles_trace_read_after_eof;
goto out_no_object;
}
if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
why = cachefiles_trace_read_no_data;
if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
goto out_no_object;
}
/* The object and the file may be being created in the background. */
if (!file) {
why = cachefiles_trace_read_no_file;
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
goto out_no_object;
file = cachefiles_cres_file(cres);
if (!file)
goto out_no_object;
ino = file_inode(file)->i_ino;
}
object = cachefiles_cres_object(cres);
cache = object->volume->cache;
cachefiles_begin_secure(cache, &saved_cred);
retry:
off = cachefiles_inject_read_error();
if (off == 0)
off = vfs_llseek(file, start, SEEK_DATA);
if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
if (off == (loff_t)-ENXIO) {
why = cachefiles_trace_read_seek_nxio;
goto download_and_store;
}
trace_cachefiles_io_error(object, file_inode(file), off,
cachefiles_trace_seek_error);
why = cachefiles_trace_read_seek_error;
goto out;
}
if (off >= start + len) {
why = cachefiles_trace_read_found_hole;
goto download_and_store;
}
if (off > start) {
off = round_up(off, cache->bsize);
len = off - start;
*_len = len;
why = cachefiles_trace_read_found_part;
goto download_and_store;
}
to = cachefiles_inject_read_error();
if (to == 0)
to = vfs_llseek(file, start, SEEK_HOLE);
if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
trace_cachefiles_io_error(object, file_inode(file), to,
cachefiles_trace_seek_error);
why = cachefiles_trace_read_seek_error;
goto out;
}
if (to < start + len) {
if (start + len >= i_size)
to = round_up(to, cache->bsize);
else
to = round_down(to, cache->bsize);
len = to - start;
*_len = len;
}
why = cachefiles_trace_read_have_data;
ret = NETFS_READ_FROM_CACHE;
goto out;
download_and_store:
__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
rc = cachefiles_ondemand_read(object, start, len);
if (!rc) {
__clear_bit(NETFS_SREQ_ONDEMAND, _flags);
goto retry;
}
ret = NETFS_INVALID_READ;
}
out:
cachefiles_end_secure(cache, saved_cred);
out_no_object:
trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino, netfs_ino);
return ret;
}
/*
* Prepare a read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
loff_t i_size)
{
return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
subreq->start, &subreq->len, i_size,
&subreq->flags, subreq->rreq->inode->i_ino);
}
/*
* Prepare an on-demand read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
static enum netfs_io_source
cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
loff_t start, size_t *_len, loff_t i_size,
unsigned long *_flags, ino_t ino)
{
return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags, ino);
}
/*
* Prepare for a write to occur.
*/
int __cachefiles_prepare_write(struct cachefiles_object *object,
struct file *file,
loff_t *_start, size_t *_len,
bool no_space_allocated_yet)
{
struct cachefiles_cache *cache = object->volume->cache;
loff_t start = *_start, pos;
size_t len = *_len, down;
int ret;
/* Round to DIO size */
down = start - round_down(start, PAGE_SIZE);
*_start = start - down;
*_len = round_up(down + len, PAGE_SIZE);
/* We need to work out whether there's sufficient disk space to perform
* the write - but we can skip that check if we have space already
* allocated.
*/
if (no_space_allocated_yet)
goto check_space;
pos = cachefiles_inject_read_error();
if (pos == 0)
pos = vfs_llseek(file, *_start, SEEK_DATA);
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
if (pos == -ENXIO)
goto check_space; /* Unallocated tail */
trace_cachefiles_io_error(object, file_inode(file), pos,
cachefiles_trace_seek_error);
return pos;
}
if ((u64)pos >= (u64)*_start + *_len)
goto check_space; /* Unallocated region */
/* We have a block that's at least partially filled - if we're low on
* space, we need to see if it's fully allocated. If it's not, we may
* want to cull it.
*/
if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
cachefiles_has_space_check) == 0)
return 0; /* Enough space to simply overwrite the whole block */
pos = cachefiles_inject_read_error();
if (pos == 0)
pos = vfs_llseek(file, *_start, SEEK_HOLE);
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
trace_cachefiles_io_error(object, file_inode(file), pos,
cachefiles_trace_seek_error);
return pos;
}
if ((u64)pos >= (u64)*_start + *_len)
return 0; /* Fully allocated */
/* Partially allocated, but insufficient space: cull. */
fscache_count_no_write_space();
ret = cachefiles_inject_remove_error();
if (ret == 0)
ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
*_start, *_len);
if (ret < 0) {
trace_cachefiles_io_error(object, file_inode(file), ret,
cachefiles_trace_fallocate_error);
cachefiles_io_error_obj(object,
"CacheFiles: fallocate failed (%d)\n", ret);
ret = -EIO;
}
return ret;
check_space:
return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
cachefiles_has_space_for_write);
}
static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet)
{
struct cachefiles_object *object = cachefiles_cres_object(cres);
struct cachefiles_cache *cache = object->volume->cache;
const struct cred *saved_cred;
int ret;
if (!cachefiles_cres_file(cres)) {
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
return -ENOBUFS;
if (!cachefiles_cres_file(cres))
return -ENOBUFS;
}
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
_start, _len,
no_space_allocated_yet);
cachefiles_end_secure(cache, saved_cred);
return ret;
}
/*
* Clean up an operation.
*/
static void cachefiles_end_operation(struct netfs_cache_resources *cres)
{
struct file *file = cachefiles_cres_file(cres);
if (file)
fput(file);
fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
}
static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.end_operation = cachefiles_end_operation,
.read = cachefiles_read,
.write = cachefiles_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
.prepare_ondemand_read = cachefiles_prepare_ondemand_read,
.query_occupancy = cachefiles_query_occupancy,
};
/*
* Open the cache file when beginning a cache operation.
*/
bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
enum fscache_want_state want_state)
{
struct cachefiles_object *object = cachefiles_cres_object(cres);
if (!cachefiles_cres_file(cres)) {
cres->ops = &cachefiles_netfs_cache_ops;
if (object->file) {
spin_lock(&object->lock);
if (!cres->cache_priv2 && object->file)
cres->cache_priv2 = get_file(object->file);
spin_unlock(&object->lock);
}
}
if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
pr_err("failed to get cres->file\n");
return false;
}
return true;
}
| linux-master | fs/cachefiles/io.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Volume handling.
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "internal.h"
#include <trace/events/fscache.h>
/*
* Allocate and set up a volume representation. We make sure all the fanout
* directories are created and pinned.
*/
void cachefiles_acquire_volume(struct fscache_volume *vcookie)
{
struct cachefiles_volume *volume;
struct cachefiles_cache *cache = vcookie->cache->cache_priv;
const struct cred *saved_cred;
struct dentry *vdentry, *fan;
size_t len;
char *name;
bool is_new = false;
int ret, n_accesses, i;
_enter("");
volume = kzalloc(sizeof(struct cachefiles_volume), GFP_KERNEL);
if (!volume)
return;
volume->vcookie = vcookie;
volume->cache = cache;
INIT_LIST_HEAD(&volume->cache_link);
cachefiles_begin_secure(cache, &saved_cred);
len = vcookie->key[0];
name = kmalloc(len + 3, GFP_NOFS);
if (!name)
goto error_vol;
name[0] = 'I';
memcpy(name + 1, vcookie->key + 1, len);
name[len + 1] = 0;
retry:
vdentry = cachefiles_get_directory(cache, cache->store, name, &is_new);
if (IS_ERR(vdentry))
goto error_name;
volume->dentry = vdentry;
if (is_new) {
if (!cachefiles_set_volume_xattr(volume))
goto error_dir;
} else {
ret = cachefiles_check_volume_xattr(volume);
if (ret < 0) {
if (ret != -ESTALE)
goto error_dir;
inode_lock_nested(d_inode(cache->store), I_MUTEX_PARENT);
cachefiles_bury_object(cache, NULL, cache->store, vdentry,
FSCACHE_VOLUME_IS_WEIRD);
cachefiles_put_directory(volume->dentry);
cond_resched();
goto retry;
}
}
for (i = 0; i < 256; i++) {
sprintf(name, "@%02x", i);
fan = cachefiles_get_directory(cache, vdentry, name, NULL);
if (IS_ERR(fan))
goto error_fan;
volume->fanout[i] = fan;
}
cachefiles_end_secure(cache, saved_cred);
vcookie->cache_priv = volume;
n_accesses = atomic_inc_return(&vcookie->n_accesses); /* Stop wakeups on dec-to-0 */
trace_fscache_access_volume(vcookie->debug_id, 0,
refcount_read(&vcookie->ref),
n_accesses, fscache_access_cache_pin);
spin_lock(&cache->object_list_lock);
list_add(&volume->cache_link, &volume->cache->volumes);
spin_unlock(&cache->object_list_lock);
kfree(name);
return;
error_fan:
for (i = 0; i < 256; i++)
cachefiles_put_directory(volume->fanout[i]);
error_dir:
cachefiles_put_directory(volume->dentry);
error_name:
kfree(name);
error_vol:
kfree(volume);
cachefiles_end_secure(cache, saved_cred);
}
/*
* Release a volume representation.
*/
static void __cachefiles_free_volume(struct cachefiles_volume *volume)
{
int i;
_enter("");
volume->vcookie->cache_priv = NULL;
for (i = 0; i < 256; i++)
cachefiles_put_directory(volume->fanout[i]);
cachefiles_put_directory(volume->dentry);
kfree(volume);
}
void cachefiles_free_volume(struct fscache_volume *vcookie)
{
struct cachefiles_volume *volume = vcookie->cache_priv;
if (volume) {
spin_lock(&volume->cache->object_list_lock);
list_del_init(&volume->cache_link);
spin_unlock(&volume->cache->object_list_lock);
__cachefiles_free_volume(volume);
}
}
void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
{
fscache_withdraw_volume(volume->vcookie);
cachefiles_set_volume_xattr(volume);
__cachefiles_free_volume(volume);
}
| linux-master | fs/cachefiles/volume.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Network filesystem caching backend to use cache files on a premounted
* filesystem
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/statfs.h>
#include <linux/sysctl.h>
#include <linux/miscdevice.h>
#include <linux/netfs.h>
#include <trace/events/netfs.h>
#define CREATE_TRACE_POINTS
#include "internal.h"
unsigned cachefiles_debug;
module_param_named(debug, cachefiles_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(cachefiles_debug, "CacheFiles debugging mask");
MODULE_DESCRIPTION("Mounted-filesystem based cache");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
struct kmem_cache *cachefiles_object_jar;
static struct miscdevice cachefiles_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "cachefiles",
.fops = &cachefiles_daemon_fops,
};
/*
* initialise the fs caching module
*/
static int __init cachefiles_init(void)
{
int ret;
ret = cachefiles_register_error_injection();
if (ret < 0)
goto error_einj;
ret = misc_register(&cachefiles_dev);
if (ret < 0)
goto error_dev;
/* create an object jar */
ret = -ENOMEM;
cachefiles_object_jar =
kmem_cache_create("cachefiles_object_jar",
sizeof(struct cachefiles_object),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!cachefiles_object_jar) {
pr_notice("Failed to allocate an object jar\n");
goto error_object_jar;
}
pr_info("Loaded\n");
return 0;
error_object_jar:
misc_deregister(&cachefiles_dev);
error_dev:
cachefiles_unregister_error_injection();
error_einj:
pr_err("failed to register: %d\n", ret);
return ret;
}
fs_initcall(cachefiles_init);
/*
* clean up on module removal
*/
static void __exit cachefiles_exit(void)
{
pr_info("Unloading\n");
kmem_cache_destroy(cachefiles_object_jar);
misc_deregister(&cachefiles_dev);
cachefiles_unregister_error_injection();
}
module_exit(cachefiles_exit);
| linux-master | fs/cachefiles/main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Daemon interface
*
* Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/mount.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/fs_struct.h>
#include "internal.h"
static int cachefiles_daemon_open(struct inode *, struct file *);
static int cachefiles_daemon_release(struct inode *, struct file *);
static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
loff_t *);
static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
size_t, loff_t *);
static __poll_t cachefiles_daemon_poll(struct file *,
struct poll_table_struct *);
static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
static void cachefiles_daemon_unbind(struct cachefiles_cache *);
static unsigned long cachefiles_open;
const struct file_operations cachefiles_daemon_fops = {
.owner = THIS_MODULE,
.open = cachefiles_daemon_open,
.release = cachefiles_daemon_release,
.read = cachefiles_daemon_read,
.write = cachefiles_daemon_write,
.poll = cachefiles_daemon_poll,
.llseek = noop_llseek,
};
struct cachefiles_daemon_cmd {
char name[8];
int (*handler)(struct cachefiles_cache *cache, char *args);
};
static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
{ "bind", cachefiles_daemon_bind },
{ "brun", cachefiles_daemon_brun },
{ "bcull", cachefiles_daemon_bcull },
{ "bstop", cachefiles_daemon_bstop },
{ "cull", cachefiles_daemon_cull },
{ "debug", cachefiles_daemon_debug },
{ "dir", cachefiles_daemon_dir },
{ "frun", cachefiles_daemon_frun },
{ "fcull", cachefiles_daemon_fcull },
{ "fstop", cachefiles_daemon_fstop },
{ "inuse", cachefiles_daemon_inuse },
{ "secctx", cachefiles_daemon_secctx },
{ "tag", cachefiles_daemon_tag },
#ifdef CONFIG_CACHEFILES_ONDEMAND
{ "copen", cachefiles_ondemand_copen },
#endif
{ "", NULL }
};
/*
* Prepare a cache for caching.
*/
static int cachefiles_daemon_open(struct inode *inode, struct file *file)
{
struct cachefiles_cache *cache;
_enter("");
/* only the superuser may do this */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* the cachefiles device may only be open once at a time */
if (xchg(&cachefiles_open, 1) == 1)
return -EBUSY;
/* allocate a cache record */
cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
if (!cache) {
cachefiles_open = 0;
return -ENOMEM;
}
mutex_init(&cache->daemon_mutex);
init_waitqueue_head(&cache->daemon_pollwq);
INIT_LIST_HEAD(&cache->volumes);
INIT_LIST_HEAD(&cache->object_list);
spin_lock_init(&cache->object_list_lock);
refcount_set(&cache->unbind_pincount, 1);
xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC);
xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1);
/* set default caching limits
* - limit at 1% free space and/or free files
* - cull below 5% free space and/or free files
* - cease culling above 7% free space and/or free files
*/
cache->frun_percent = 7;
cache->fcull_percent = 5;
cache->fstop_percent = 1;
cache->brun_percent = 7;
cache->bcull_percent = 5;
cache->bstop_percent = 1;
file->private_data = cache;
cache->cachefilesd = file;
return 0;
}
static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
{
struct xarray *xa = &cache->reqs;
struct cachefiles_req *req;
unsigned long index;
/*
* Make sure the following two operations won't be reordered.
* 1) set CACHEFILES_DEAD bit
* 2) flush requests in the xarray
* Otherwise the request may be enqueued after xarray has been
* flushed, leaving the orphan request never being completed.
*
* CPU 1 CPU 2
* ===== =====
* flush requests in the xarray
* test CACHEFILES_DEAD bit
* enqueue the request
* set CACHEFILES_DEAD bit
*/
smp_mb();
xa_lock(xa);
xa_for_each(xa, index, req) {
req->error = -EIO;
complete(&req->done);
}
xa_unlock(xa);
xa_destroy(&cache->reqs);
xa_destroy(&cache->ondemand_ids);
}
void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache)
{
if (refcount_dec_and_test(&cache->unbind_pincount)) {
cachefiles_daemon_unbind(cache);
cachefiles_open = 0;
kfree(cache);
}
}
void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache)
{
refcount_inc(&cache->unbind_pincount);
}
/*
* Release a cache.
*/
static int cachefiles_daemon_release(struct inode *inode, struct file *file)
{
struct cachefiles_cache *cache = file->private_data;
_enter("");
ASSERT(cache);
set_bit(CACHEFILES_DEAD, &cache->flags);
if (cachefiles_in_ondemand_mode(cache))
cachefiles_flush_reqs(cache);
/* clean up the control file interface */
cache->cachefilesd = NULL;
file->private_data = NULL;
cachefiles_put_unbind_pincount(cache);
_leave("");
return 0;
}
static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
{
unsigned long long b_released;
unsigned f_released;
char buffer[256];
int n;
/* check how much space the cache has */
cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
/* summarise */
f_released = atomic_xchg(&cache->f_released, 0);
b_released = atomic_long_xchg(&cache->b_released, 0);
clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
n = snprintf(buffer, sizeof(buffer),
"cull=%c"
" frun=%llx"
" fcull=%llx"
" fstop=%llx"
" brun=%llx"
" bcull=%llx"
" bstop=%llx"
" freleased=%x"
" breleased=%llx",
test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
(unsigned long long) cache->frun,
(unsigned long long) cache->fcull,
(unsigned long long) cache->fstop,
(unsigned long long) cache->brun,
(unsigned long long) cache->bcull,
(unsigned long long) cache->bstop,
f_released,
b_released);
if (n > buflen)
return -EMSGSIZE;
if (copy_to_user(_buffer, buffer, n) != 0)
return -EFAULT;
return n;
}
/*
* Read the cache state.
*/
static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
size_t buflen, loff_t *pos)
{
struct cachefiles_cache *cache = file->private_data;
//_enter(",,%zu,", buflen);
if (!test_bit(CACHEFILES_READY, &cache->flags))
return 0;
if (cachefiles_in_ondemand_mode(cache))
return cachefiles_ondemand_daemon_read(cache, _buffer, buflen);
else
return cachefiles_do_daemon_read(cache, _buffer, buflen);
}
/*
* Take a command from cachefilesd, parse it and act on it.
*/
static ssize_t cachefiles_daemon_write(struct file *file,
const char __user *_data,
size_t datalen,
loff_t *pos)
{
const struct cachefiles_daemon_cmd *cmd;
struct cachefiles_cache *cache = file->private_data;
ssize_t ret;
char *data, *args, *cp;
//_enter(",,%zu,", datalen);
ASSERT(cache);
if (test_bit(CACHEFILES_DEAD, &cache->flags))
return -EIO;
if (datalen > PAGE_SIZE - 1)
return -EOPNOTSUPP;
/* drag the command string into the kernel so we can parse it */
data = memdup_user_nul(_data, datalen);
if (IS_ERR(data))
return PTR_ERR(data);
ret = -EINVAL;
if (memchr(data, '\0', datalen))
goto error;
/* strip any newline */
cp = memchr(data, '\n', datalen);
if (cp) {
if (cp == data)
goto error;
*cp = '\0';
}
/* parse the command */
ret = -EOPNOTSUPP;
for (args = data; *args; args++)
if (isspace(*args))
break;
if (*args) {
if (args == data)
goto error;
*args = '\0';
args = skip_spaces(++args);
}
/* run the appropriate command handler */
for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
if (strcmp(cmd->name, data) == 0)
goto found_command;
error:
kfree(data);
//_leave(" = %zd", ret);
return ret;
found_command:
mutex_lock(&cache->daemon_mutex);
ret = -EIO;
if (!test_bit(CACHEFILES_DEAD, &cache->flags))
ret = cmd->handler(cache, args);
mutex_unlock(&cache->daemon_mutex);
if (ret == 0)
ret = datalen;
goto error;
}
/*
* Poll for culling state
* - use EPOLLOUT to indicate culling state
*/
static __poll_t cachefiles_daemon_poll(struct file *file,
struct poll_table_struct *poll)
{
struct cachefiles_cache *cache = file->private_data;
__poll_t mask;
poll_wait(file, &cache->daemon_pollwq, poll);
mask = 0;
if (cachefiles_in_ondemand_mode(cache)) {
if (!xa_empty(&cache->reqs))
mask |= EPOLLIN;
} else {
if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
mask |= EPOLLIN;
}
if (test_bit(CACHEFILES_CULLING, &cache->flags))
mask |= EPOLLOUT;
return mask;
}
/*
* Give a range error for cache space constraints
* - can be tail-called
*/
static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
char *args)
{
pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
return -EINVAL;
}
/*
* Set the percentage of files at which to stop culling
* - command: "frun <N>%"
*/
static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
{
unsigned long frun;
_enter(",%s", args);
if (!*args)
return -EINVAL;
frun = simple_strtoul(args, &args, 10);
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
if (frun <= cache->fcull_percent || frun >= 100)
return cachefiles_daemon_range_error(cache, args);
cache->frun_percent = frun;
return 0;
}
/*
* Set the percentage of files at which to start culling
* - command: "fcull <N>%"
*/
static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
{
unsigned long fcull;
_enter(",%s", args);
if (!*args)
return -EINVAL;
fcull = simple_strtoul(args, &args, 10);
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
return cachefiles_daemon_range_error(cache, args);
cache->fcull_percent = fcull;
return 0;
}
/*
* Set the percentage of files at which to stop allocating
* - command: "fstop <N>%"
*/
static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
{
unsigned long fstop;
_enter(",%s", args);
if (!*args)
return -EINVAL;
fstop = simple_strtoul(args, &args, 10);
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
if (fstop >= cache->fcull_percent)
return cachefiles_daemon_range_error(cache, args);
cache->fstop_percent = fstop;
return 0;
}
/*
* Set the percentage of blocks at which to stop culling
* - command: "brun <N>%"
*/
static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
{
unsigned long brun;
_enter(",%s", args);
if (!*args)
return -EINVAL;
brun = simple_strtoul(args, &args, 10);
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
if (brun <= cache->bcull_percent || brun >= 100)
return cachefiles_daemon_range_error(cache, args);
cache->brun_percent = brun;
return 0;
}
/*
* Set the percentage of blocks at which to start culling
* - command: "bcull <N>%"
*/
static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
{
unsigned long bcull;
_enter(",%s", args);
if (!*args)
return -EINVAL;
bcull = simple_strtoul(args, &args, 10);
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
return cachefiles_daemon_range_error(cache, args);
cache->bcull_percent = bcull;
return 0;
}
/*
* Set the percentage of blocks at which to stop allocating
* - command: "bstop <N>%"
*/
static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
{
unsigned long bstop;
_enter(",%s", args);
if (!*args)
return -EINVAL;
bstop = simple_strtoul(args, &args, 10);
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
if (bstop >= cache->bcull_percent)
return cachefiles_daemon_range_error(cache, args);
cache->bstop_percent = bstop;
return 0;
}
/*
* Set the cache directory
* - command: "dir <name>"
*/
static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
{
char *dir;
_enter(",%s", args);
if (!*args) {
pr_err("Empty directory specified\n");
return -EINVAL;
}
if (cache->rootdirname) {
pr_err("Second cache directory specified\n");
return -EEXIST;
}
dir = kstrdup(args, GFP_KERNEL);
if (!dir)
return -ENOMEM;
cache->rootdirname = dir;
return 0;
}
/*
* Set the cache security context
* - command: "secctx <ctx>"
*/
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
{
char *secctx;
_enter(",%s", args);
if (!*args) {
pr_err("Empty security context specified\n");
return -EINVAL;
}
if (cache->secctx) {
pr_err("Second security context specified\n");
return -EINVAL;
}
secctx = kstrdup(args, GFP_KERNEL);
if (!secctx)
return -ENOMEM;
cache->secctx = secctx;
return 0;
}
/*
* Set the cache tag
* - command: "tag <name>"
*/
static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
{
char *tag;
_enter(",%s", args);
if (!*args) {
pr_err("Empty tag specified\n");
return -EINVAL;
}
if (cache->tag)
return -EEXIST;
tag = kstrdup(args, GFP_KERNEL);
if (!tag)
return -ENOMEM;
cache->tag = tag;
return 0;
}
/*
* Request a node in the cache be culled from the current working directory
* - command: "cull <name>"
*/
static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
{
struct path path;
const struct cred *saved_cred;
int ret;
_enter(",%s", args);
if (strchr(args, '/'))
goto inval;
if (!test_bit(CACHEFILES_READY, &cache->flags)) {
pr_err("cull applied to unready cache\n");
return -EIO;
}
if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
pr_err("cull applied to dead cache\n");
return -EIO;
}
get_fs_pwd(current->fs, &path);
if (!d_can_lookup(path.dentry))
goto notdir;
cachefiles_begin_secure(cache, &saved_cred);
ret = cachefiles_cull(cache, path.dentry, args);
cachefiles_end_secure(cache, saved_cred);
path_put(&path);
_leave(" = %d", ret);
return ret;
notdir:
path_put(&path);
pr_err("cull command requires dirfd to be a directory\n");
return -ENOTDIR;
inval:
pr_err("cull command requires dirfd and filename\n");
return -EINVAL;
}
/*
* Set debugging mode
* - command: "debug <mask>"
*/
static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
{
unsigned long mask;
_enter(",%s", args);
mask = simple_strtoul(args, &args, 0);
if (args[0] != '\0')
goto inval;
cachefiles_debug = mask;
_leave(" = 0");
return 0;
inval:
pr_err("debug command requires mask\n");
return -EINVAL;
}
/*
* Find out whether an object in the current working directory is in use or not
* - command: "inuse <name>"
*/
static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
{
struct path path;
const struct cred *saved_cred;
int ret;
//_enter(",%s", args);
if (strchr(args, '/'))
goto inval;
if (!test_bit(CACHEFILES_READY, &cache->flags)) {
pr_err("inuse applied to unready cache\n");
return -EIO;
}
if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
pr_err("inuse applied to dead cache\n");
return -EIO;
}
get_fs_pwd(current->fs, &path);
if (!d_can_lookup(path.dentry))
goto notdir;
cachefiles_begin_secure(cache, &saved_cred);
ret = cachefiles_check_in_use(cache, path.dentry, args);
cachefiles_end_secure(cache, saved_cred);
path_put(&path);
//_leave(" = %d", ret);
return ret;
notdir:
path_put(&path);
pr_err("inuse command requires dirfd to be a directory\n");
return -ENOTDIR;
inval:
pr_err("inuse command requires dirfd and filename\n");
return -EINVAL;
}
/*
* Bind a directory as a cache
*/
static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
{
_enter("{%u,%u,%u,%u,%u,%u},%s",
cache->frun_percent,
cache->fcull_percent,
cache->fstop_percent,
cache->brun_percent,
cache->bcull_percent,
cache->bstop_percent,
args);
if (cache->fstop_percent >= cache->fcull_percent ||
cache->fcull_percent >= cache->frun_percent ||
cache->frun_percent >= 100)
return -ERANGE;
if (cache->bstop_percent >= cache->bcull_percent ||
cache->bcull_percent >= cache->brun_percent ||
cache->brun_percent >= 100)
return -ERANGE;
if (!cache->rootdirname) {
pr_err("No cache directory specified\n");
return -EINVAL;
}
/* Don't permit already bound caches to be re-bound */
if (test_bit(CACHEFILES_READY, &cache->flags)) {
pr_err("Cache already bound\n");
return -EBUSY;
}
if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) {
if (!strcmp(args, "ondemand")) {
set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags);
} else if (*args) {
pr_err("Invalid argument to the 'bind' command\n");
return -EINVAL;
}
} else if (*args) {
pr_err("'bind' command doesn't take an argument\n");
return -EINVAL;
}
/* Make sure we have copies of the tag string */
if (!cache->tag) {
/*
* The tag string is released by the fops->release()
* function, so we don't release it on error here
*/
cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
if (!cache->tag)
return -ENOMEM;
}
return cachefiles_add_cache(cache);
}
/*
* Unbind a cache.
*/
static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
{
_enter("");
if (test_bit(CACHEFILES_READY, &cache->flags))
cachefiles_withdraw_cache(cache);
cachefiles_put_directory(cache->graveyard);
cachefiles_put_directory(cache->store);
mntput(cache->mnt);
kfree(cache->rootdirname);
kfree(cache->secctx);
kfree(cache->tag);
_leave("");
}
| linux-master | fs/cachefiles/daemon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles path walking and related routines
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include "internal.h"
/*
* Mark the backing file as being a cache file if it's not already in use. The
* mark tells the culling request command that it's not allowed to cull the
* file or directory. The caller must hold the inode lock.
*/
static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
struct inode *inode)
{
bool can_use = false;
if (!(inode->i_flags & S_KERNEL_FILE)) {
inode->i_flags |= S_KERNEL_FILE;
trace_cachefiles_mark_active(object, inode);
can_use = true;
} else {
trace_cachefiles_mark_failed(object, inode);
}
return can_use;
}
static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
struct inode *inode)
{
bool can_use;
inode_lock(inode);
can_use = __cachefiles_mark_inode_in_use(object, inode);
inode_unlock(inode);
return can_use;
}
/*
* Unmark a backing inode. The caller must hold the inode lock.
*/
static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
struct inode *inode)
{
inode->i_flags &= ~S_KERNEL_FILE;
trace_cachefiles_mark_inactive(object, inode);
}
static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
struct inode *inode)
{
inode_lock(inode);
__cachefiles_unmark_inode_in_use(object, inode);
inode_unlock(inode);
}
/*
* Unmark a backing inode and tell cachefilesd that there's something that can
* be culled.
*/
void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
struct file *file)
{
struct cachefiles_cache *cache = object->volume->cache;
struct inode *inode = file_inode(file);
cachefiles_do_unmark_inode_in_use(object, inode);
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
atomic_long_add(inode->i_blocks, &cache->b_released);
if (atomic_inc_return(&cache->f_released))
cachefiles_state_changed(cache);
}
}
/*
* get a subdirectory
*/
struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
struct dentry *dir,
const char *dirname,
bool *_is_new)
{
struct dentry *subdir;
struct path path;
int ret;
_enter(",,%s", dirname);
/* search the current directory for the element name */
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
retry:
ret = cachefiles_inject_read_error();
if (ret == 0)
subdir = lookup_one_len(dirname, dir, strlen(dirname));
else
subdir = ERR_PTR(ret);
trace_cachefiles_lookup(NULL, dir, subdir);
if (IS_ERR(subdir)) {
trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
PTR_ERR(subdir),
cachefiles_trace_lookup_error);
if (PTR_ERR(subdir) == -ENOMEM)
goto nomem_d_alloc;
goto lookup_error;
}
_debug("subdir -> %pd %s",
subdir, d_backing_inode(subdir) ? "positive" : "negative");
/* we need to create the subdir if it doesn't exist yet */
if (d_is_negative(subdir)) {
ret = cachefiles_has_space(cache, 1, 0,
cachefiles_has_space_for_create);
if (ret < 0)
goto mkdir_error;
_debug("attempt mkdir");
path.mnt = cache->mnt;
path.dentry = dir;
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
if (ret < 0) {
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
cachefiles_trace_mkdir_error);
goto mkdir_error;
}
trace_cachefiles_mkdir(dir, subdir);
if (unlikely(d_unhashed(subdir))) {
cachefiles_put_directory(subdir);
goto retry;
}
ASSERT(d_backing_inode(subdir));
_debug("mkdir -> %pd{ino=%lu}",
subdir, d_backing_inode(subdir)->i_ino);
if (_is_new)
*_is_new = true;
}
/* Tell rmdir() it's not allowed to delete the subdir */
inode_lock(d_inode(subdir));
inode_unlock(d_inode(dir));
if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
subdir, d_inode(subdir)->i_ino);
goto mark_error;
}
inode_unlock(d_inode(subdir));
/* we need to make sure the subdir is a directory */
ASSERT(d_backing_inode(subdir));
if (!d_can_lookup(subdir)) {
pr_err("%s is not a directory\n", dirname);
ret = -EIO;
goto check_error;
}
ret = -EPERM;
if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
!d_backing_inode(subdir)->i_op->lookup ||
!d_backing_inode(subdir)->i_op->mkdir ||
!d_backing_inode(subdir)->i_op->rename ||
!d_backing_inode(subdir)->i_op->rmdir ||
!d_backing_inode(subdir)->i_op->unlink)
goto check_error;
_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
return subdir;
check_error:
cachefiles_put_directory(subdir);
_leave(" = %d [check]", ret);
return ERR_PTR(ret);
mark_error:
inode_unlock(d_inode(subdir));
dput(subdir);
return ERR_PTR(-EBUSY);
mkdir_error:
inode_unlock(d_inode(dir));
dput(subdir);
pr_err("mkdir %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
lookup_error:
inode_unlock(d_inode(dir));
ret = PTR_ERR(subdir);
pr_err("Lookup %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
nomem_d_alloc:
inode_unlock(d_inode(dir));
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
/*
* Put a subdirectory.
*/
void cachefiles_put_directory(struct dentry *dir)
{
if (dir) {
cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
dput(dir);
}
}
/*
* Remove a regular file from the cache.
*/
static int cachefiles_unlink(struct cachefiles_cache *cache,
struct cachefiles_object *object,
struct dentry *dir, struct dentry *dentry,
enum fscache_why_object_killed why)
{
struct path path = {
.mnt = cache->mnt,
.dentry = dir,
};
int ret;
trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
ret = security_path_unlink(&path, dentry);
if (ret < 0) {
cachefiles_io_error(cache, "Unlink security error");
return ret;
}
ret = cachefiles_inject_remove_error();
if (ret == 0) {
ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
if (ret == -EIO)
cachefiles_io_error(cache, "Unlink failed");
}
if (ret != 0)
trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
cachefiles_trace_unlink_error);
return ret;
}
/*
* Delete an object representation from the cache
* - File backed objects are unlinked
* - Directory backed objects are stuffed into the graveyard for userspace to
* delete
*/
int cachefiles_bury_object(struct cachefiles_cache *cache,
struct cachefiles_object *object,
struct dentry *dir,
struct dentry *rep,
enum fscache_why_object_killed why)
{
struct dentry *grave, *trap;
struct path path, path_to_graveyard;
char nbuffer[8 + 8 + 1];
int ret;
_enter(",'%pd','%pd'", dir, rep);
if (rep->d_parent != dir) {
inode_unlock(d_inode(dir));
_leave(" = -ESTALE");
return -ESTALE;
}
/* non-directories can just be unlinked */
if (!d_is_dir(rep)) {
dget(rep); /* Stop the dentry being negated if it's only pinned
* by a file struct.
*/
ret = cachefiles_unlink(cache, object, dir, rep, why);
dput(rep);
inode_unlock(d_inode(dir));
_leave(" = %d", ret);
return ret;
}
/* directories have to be moved to the graveyard */
_debug("move stale object to graveyard");
inode_unlock(d_inode(dir));
try_again:
/* first step is to make up a grave dentry in the graveyard */
sprintf(nbuffer, "%08x%08x",
(uint32_t) ktime_get_real_seconds(),
(uint32_t) atomic_inc_return(&cache->gravecounter));
/* do the multiway lock magic */
trap = lock_rename(cache->graveyard, dir);
/* do some checks before getting the grave dentry */
if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
/* the entry was probably culled when we dropped the parent dir
* lock */
unlock_rename(cache->graveyard, dir);
_leave(" = 0 [culled?]");
return 0;
}
if (!d_can_lookup(cache->graveyard)) {
unlock_rename(cache->graveyard, dir);
cachefiles_io_error(cache, "Graveyard no longer a directory");
return -EIO;
}
if (trap == rep) {
unlock_rename(cache->graveyard, dir);
cachefiles_io_error(cache, "May not make directory loop");
return -EIO;
}
if (d_mountpoint(rep)) {
unlock_rename(cache->graveyard, dir);
cachefiles_io_error(cache, "Mountpoint in cache");
return -EIO;
}
grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
if (IS_ERR(grave)) {
unlock_rename(cache->graveyard, dir);
trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
PTR_ERR(grave),
cachefiles_trace_lookup_error);
if (PTR_ERR(grave) == -ENOMEM) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
return -EIO;
}
if (d_is_positive(grave)) {
unlock_rename(cache->graveyard, dir);
dput(grave);
grave = NULL;
cond_resched();
goto try_again;
}
if (d_mountpoint(grave)) {
unlock_rename(cache->graveyard, dir);
dput(grave);
cachefiles_io_error(cache, "Mountpoint in graveyard");
return -EIO;
}
/* target should not be an ancestor of source */
if (trap == grave) {
unlock_rename(cache->graveyard, dir);
dput(grave);
cachefiles_io_error(cache, "May not make directory loop");
return -EIO;
}
/* attempt the rename */
path.mnt = cache->mnt;
path.dentry = dir;
path_to_graveyard.mnt = cache->mnt;
path_to_graveyard.dentry = cache->graveyard;
ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
if (ret < 0) {
cachefiles_io_error(cache, "Rename security error %d", ret);
} else {
struct renamedata rd = {
.old_mnt_idmap = &nop_mnt_idmap,
.old_dir = d_inode(dir),
.old_dentry = rep,
.new_mnt_idmap = &nop_mnt_idmap,
.new_dir = d_inode(cache->graveyard),
.new_dentry = grave,
};
trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
ret = cachefiles_inject_read_error();
if (ret == 0)
ret = vfs_rename(&rd);
if (ret != 0)
trace_cachefiles_vfs_error(object, d_inode(dir), ret,
cachefiles_trace_rename_error);
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache,
"Rename failed with error %d", ret);
}
__cachefiles_unmark_inode_in_use(object, d_inode(rep));
unlock_rename(cache->graveyard, dir);
dput(grave);
_leave(" = 0");
return 0;
}
/*
* Delete a cache file.
*/
int cachefiles_delete_object(struct cachefiles_object *object,
enum fscache_why_object_killed why)
{
struct cachefiles_volume *volume = object->volume;
struct dentry *dentry = object->file->f_path.dentry;
struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
int ret;
_enter(",OBJ%x{%pD}", object->debug_id, object->file);
/* Stop the dentry being negated if it's only pinned by a file struct. */
dget(dentry);
inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
inode_unlock(d_backing_inode(fan));
dput(dentry);
return ret;
}
/*
* Create a temporary file and leave it unattached and un-xattr'd until the
* time comes to discard the object from memory.
*/
struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
{
struct cachefiles_volume *volume = object->volume;
struct cachefiles_cache *cache = volume->cache;
const struct cred *saved_cred;
struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
struct file *file;
const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
uint64_t ni_size;
long ret;
cachefiles_begin_secure(cache, &saved_cred);
ret = cachefiles_inject_write_error();
if (ret == 0) {
file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
S_IFREG | 0600,
O_RDWR | O_LARGEFILE | O_DIRECT,
cache->cache_cred);
ret = PTR_ERR_OR_ZERO(file);
}
if (ret) {
trace_cachefiles_vfs_error(object, d_inode(fan), ret,
cachefiles_trace_tmpfile_error);
if (ret == -EIO)
cachefiles_io_error_obj(object, "Failed to create tmpfile");
goto err;
}
trace_cachefiles_tmpfile(object, file_inode(file));
/* This is a newly created file with no other possible user */
if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
WARN_ON(1);
ret = cachefiles_ondemand_init_object(object);
if (ret < 0)
goto err_unuse;
ni_size = object->cookie->object_size;
ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
if (ni_size > 0) {
trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
cachefiles_trunc_expand_tmpfile);
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_truncate(&file->f_path, ni_size);
if (ret < 0) {
trace_cachefiles_vfs_error(
object, file_inode(file), ret,
cachefiles_trace_trunc_error);
goto err_unuse;
}
}
ret = -EINVAL;
if (unlikely(!file->f_op->read_iter) ||
unlikely(!file->f_op->write_iter)) {
fput(file);
pr_notice("Cache does not support read_iter and write_iter\n");
goto err_unuse;
}
out:
cachefiles_end_secure(cache, saved_cred);
return file;
err_unuse:
cachefiles_do_unmark_inode_in_use(object, file_inode(file));
fput(file);
err:
file = ERR_PTR(ret);
goto out;
}
/*
* Create a new file.
*/
static bool cachefiles_create_file(struct cachefiles_object *object)
{
struct file *file;
int ret;
ret = cachefiles_has_space(object->volume->cache, 1, 0,
cachefiles_has_space_for_create);
if (ret < 0)
return false;
file = cachefiles_create_tmpfile(object);
if (IS_ERR(file))
return false;
set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
_debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
object->file = file;
return true;
}
/*
* Open an existing file, checking its attributes and replacing it if it is
* stale.
*/
static bool cachefiles_open_file(struct cachefiles_object *object,
struct dentry *dentry)
{
struct cachefiles_cache *cache = object->volume->cache;
struct file *file;
struct path path;
int ret;
_enter("%pd", dentry);
if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
dentry, d_inode(dentry)->i_ino);
return false;
}
/* We need to open a file interface onto a data file now as we can't do
* it on demand because writeback called from do_exit() sees
* current->fs == NULL - which breaks d_path() called from ext4 open.
*/
path.mnt = cache->mnt;
path.dentry = dentry;
file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
d_backing_inode(dentry), cache->cache_cred);
if (IS_ERR(file)) {
trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
PTR_ERR(file),
cachefiles_trace_open_error);
goto error;
}
if (unlikely(!file->f_op->read_iter) ||
unlikely(!file->f_op->write_iter)) {
pr_notice("Cache does not support read_iter and write_iter\n");
goto error_fput;
}
_debug("file -> %pd positive", dentry);
ret = cachefiles_ondemand_init_object(object);
if (ret < 0)
goto error_fput;
ret = cachefiles_check_auxdata(object, file);
if (ret < 0)
goto check_failed;
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
object->file = file;
/* Always update the atime on an object we've just looked up (this is
* used to keep track of culling, and atimes are only updated by read,
* write and readdir but not lookup or open).
*/
touch_atime(&file->f_path);
dput(dentry);
return true;
check_failed:
fscache_cookie_lookup_negative(object->cookie);
cachefiles_unmark_inode_in_use(object, file);
fput(file);
dput(dentry);
if (ret == -ESTALE)
return cachefiles_create_file(object);
return false;
error_fput:
fput(file);
error:
cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
dput(dentry);
return false;
}
/*
* walk from the parent object to the child object through the backing
* filesystem, creating directories as we go
*/
bool cachefiles_look_up_object(struct cachefiles_object *object)
{
struct cachefiles_volume *volume = object->volume;
struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
int ret;
_enter("OBJ%x,%s,", object->debug_id, object->d_name);
/* Look up path "cache/vol/fanout/file". */
ret = cachefiles_inject_read_error();
if (ret == 0)
dentry = lookup_positive_unlocked(object->d_name, fan,
object->d_name_len);
else
dentry = ERR_PTR(ret);
trace_cachefiles_lookup(object, fan, dentry);
if (IS_ERR(dentry)) {
if (dentry == ERR_PTR(-ENOENT))
goto new_file;
if (dentry == ERR_PTR(-EIO))
cachefiles_io_error_obj(object, "Lookup failed");
return false;
}
if (!d_is_reg(dentry)) {
pr_err("%pd is not a file\n", dentry);
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
FSCACHE_OBJECT_IS_WEIRD);
dput(dentry);
if (ret < 0)
return false;
goto new_file;
}
if (!cachefiles_open_file(object, dentry))
return false;
_leave(" = t [%lu]", file_inode(object->file)->i_ino);
return true;
new_file:
fscache_cookie_lookup_negative(object->cookie);
return cachefiles_create_file(object);
}
/*
* Attempt to link a temporary file into its rightful place in the cache.
*/
bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
struct cachefiles_object *object)
{
struct cachefiles_volume *volume = object->volume;
struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
bool success = false;
int ret;
_enter(",%pD", object->file);
inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
ret = cachefiles_inject_read_error();
if (ret == 0)
dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
else
dentry = ERR_PTR(ret);
if (IS_ERR(dentry)) {
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
cachefiles_trace_lookup_error);
_debug("lookup fail %ld", PTR_ERR(dentry));
goto out_unlock;
}
if (!d_is_negative(dentry)) {
if (d_backing_inode(dentry) == file_inode(object->file)) {
success = true;
goto out_dput;
}
ret = cachefiles_unlink(volume->cache, object, fan, dentry,
FSCACHE_OBJECT_IS_STALE);
if (ret < 0)
goto out_dput;
dput(dentry);
ret = cachefiles_inject_read_error();
if (ret == 0)
dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
else
dentry = ERR_PTR(ret);
if (IS_ERR(dentry)) {
trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
cachefiles_trace_lookup_error);
_debug("lookup fail %ld", PTR_ERR(dentry));
goto out_unlock;
}
}
ret = cachefiles_inject_read_error();
if (ret == 0)
ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
d_inode(fan), dentry, NULL);
if (ret < 0) {
trace_cachefiles_vfs_error(object, d_inode(fan), ret,
cachefiles_trace_link_error);
_debug("link fail %d", ret);
} else {
trace_cachefiles_link(object, file_inode(object->file));
spin_lock(&object->lock);
/* TODO: Do we want to switch the file pointer to the new dentry? */
clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
spin_unlock(&object->lock);
success = true;
}
out_dput:
dput(dentry);
out_unlock:
inode_unlock(d_inode(fan));
_leave(" = %u", success);
return success;
}
/*
* Look up an inode to be checked or culled. Return -EBUSY if the inode is
* marked in use.
*/
static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
struct dentry *dir,
char *filename)
{
struct dentry *victim;
int ret = -ENOENT;
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
victim = lookup_one_len(filename, dir, strlen(filename));
if (IS_ERR(victim))
goto lookup_error;
if (d_is_negative(victim))
goto lookup_put;
if (d_inode(victim)->i_flags & S_KERNEL_FILE)
goto lookup_busy;
return victim;
lookup_busy:
ret = -EBUSY;
lookup_put:
inode_unlock(d_inode(dir));
dput(victim);
return ERR_PTR(ret);
lookup_error:
inode_unlock(d_inode(dir));
ret = PTR_ERR(victim);
if (ret == -ENOENT)
return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
if (ret == -EIO) {
cachefiles_io_error(cache, "Lookup failed");
} else if (ret != -ENOMEM) {
pr_err("Internal error: %d\n", ret);
ret = -EIO;
}
return ERR_PTR(ret);
}
/*
* Cull an object if it's not in use
* - called only by cache manager daemon
*/
int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
struct inode *inode;
int ret;
_enter(",%pd/,%s", dir, filename);
victim = cachefiles_lookup_for_cull(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
/* check to see if someone is using this object */
inode = d_inode(victim);
inode_lock(inode);
if (inode->i_flags & S_KERNEL_FILE) {
ret = -EBUSY;
} else {
/* Stop the cache from picking it back up */
inode->i_flags |= S_KERNEL_FILE;
ret = 0;
}
inode_unlock(inode);
if (ret < 0)
goto error_unlock;
ret = cachefiles_bury_object(cache, NULL, dir, victim,
FSCACHE_OBJECT_WAS_CULLED);
if (ret < 0)
goto error;
fscache_count_culled();
dput(victim);
_leave(" = 0");
return 0;
error_unlock:
inode_unlock(d_inode(dir));
error:
dput(victim);
if (ret == -ENOENT)
return -ESTALE; /* Probably got retired by the netfs */
if (ret != -ENOMEM) {
pr_err("Internal error: %d\n", ret);
ret = -EIO;
}
_leave(" = %d", ret);
return ret;
}
/*
* Find out if an object is in use or not
* - called only by cache manager daemon
* - returns -EBUSY or 0 to indicate whether an object is in use or not
*/
int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
int ret = 0;
victim = cachefiles_lookup_for_cull(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
inode_unlock(d_inode(dir));
dput(victim);
return ret;
}
| linux-master | fs/cachefiles/namei.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/fdtable.h>
#include <linux/anon_inodes.h>
#include <linux/uio.h>
#include "internal.h"
static int cachefiles_ondemand_fd_release(struct inode *inode,
struct file *file)
{
struct cachefiles_object *object = file->private_data;
struct cachefiles_cache *cache = object->volume->cache;
int object_id = object->ondemand_id;
struct cachefiles_req *req;
XA_STATE(xas, &cache->reqs, 0);
xa_lock(&cache->reqs);
object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
/*
* Flush all pending READ requests since their completion depends on
* anon_fd.
*/
xas_for_each(&xas, req, ULONG_MAX) {
if (req->msg.object_id == object_id &&
req->msg.opcode == CACHEFILES_OP_READ) {
req->error = -EIO;
complete(&req->done);
xas_store(&xas, NULL);
}
}
xa_unlock(&cache->reqs);
xa_erase(&cache->ondemand_ids, object_id);
trace_cachefiles_ondemand_fd_release(object, object_id);
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
cachefiles_put_unbind_pincount(cache);
return 0;
}
static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
struct iov_iter *iter)
{
struct cachefiles_object *object = kiocb->ki_filp->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct file *file = object->file;
size_t len = iter->count;
loff_t pos = kiocb->ki_pos;
const struct cred *saved_cred;
int ret;
if (!file)
return -ENOBUFS;
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
cachefiles_end_secure(cache, saved_cred);
if (ret < 0)
return ret;
trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
if (!ret)
ret = len;
return ret;
}
static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
int whence)
{
struct cachefiles_object *object = filp->private_data;
struct file *file = object->file;
if (!file)
return -ENOBUFS;
return vfs_llseek(file, pos, whence);
}
static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct cachefiles_object *object = filp->private_data;
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_req *req;
unsigned long id;
if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
return -EINVAL;
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
id = arg;
req = xa_erase(&cache->reqs, id);
if (!req)
return -EINVAL;
trace_cachefiles_ondemand_cread(object, id);
complete(&req->done);
return 0;
}
static const struct file_operations cachefiles_ondemand_fd_fops = {
.owner = THIS_MODULE,
.release = cachefiles_ondemand_fd_release,
.write_iter = cachefiles_ondemand_fd_write_iter,
.llseek = cachefiles_ondemand_fd_llseek,
.unlocked_ioctl = cachefiles_ondemand_fd_ioctl,
};
/*
* OPEN request Completion (copen)
* - command: "copen <id>,<cache_size>"
* <cache_size> indicates the object size if >=0, error code if negative
*/
int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
{
struct cachefiles_req *req;
struct fscache_cookie *cookie;
char *pid, *psize;
unsigned long id;
long size;
int ret;
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return -EOPNOTSUPP;
if (!*args) {
pr_err("Empty id specified\n");
return -EINVAL;
}
pid = args;
psize = strchr(args, ',');
if (!psize) {
pr_err("Cache size is not specified\n");
return -EINVAL;
}
*psize = 0;
psize++;
ret = kstrtoul(pid, 0, &id);
if (ret)
return ret;
req = xa_erase(&cache->reqs, id);
if (!req)
return -EINVAL;
/* fail OPEN request if copen format is invalid */
ret = kstrtol(psize, 0, &size);
if (ret) {
req->error = ret;
goto out;
}
/* fail OPEN request if daemon reports an error */
if (size < 0) {
if (!IS_ERR_VALUE(size)) {
req->error = -EINVAL;
ret = -EINVAL;
} else {
req->error = size;
ret = 0;
}
goto out;
}
cookie = req->object->cookie;
cookie->object_size = size;
if (size)
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
else
set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
trace_cachefiles_ondemand_copen(req->object, id, size);
out:
complete(&req->done);
return ret;
}
static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
struct cachefiles_open *load;
struct file *file;
u32 object_id;
int ret, fd;
object = cachefiles_grab_object(req->object,
cachefiles_obj_get_ondemand_fd);
cache = object->volume->cache;
ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
XA_LIMIT(1, INT_MAX),
&cache->ondemand_id_next, GFP_KERNEL);
if (ret < 0)
goto err;
fd = get_unused_fd_flags(O_WRONLY);
if (fd < 0) {
ret = fd;
goto err_free_id;
}
file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
object, O_WRONLY);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err_put_fd;
}
file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
fd_install(fd, file);
load = (void *)req->msg.data;
load->fd = fd;
req->msg.object_id = object_id;
object->ondemand_id = object_id;
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
return 0;
err_put_fd:
put_unused_fd(fd);
err_free_id:
xa_erase(&cache->ondemand_ids, object_id);
err:
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
return ret;
}
ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
char __user *_buffer, size_t buflen)
{
struct cachefiles_req *req;
struct cachefiles_msg *msg;
unsigned long id = 0;
size_t n;
int ret = 0;
XA_STATE(xas, &cache->reqs, cache->req_id_next);
/*
* Cyclically search for a request that has not ever been processed,
* to prevent requests from being processed repeatedly, and make
* request distribution fair.
*/
xa_lock(&cache->reqs);
req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
if (!req && cache->req_id_next > 0) {
xas_set(&xas, 0);
req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
}
if (!req) {
xa_unlock(&cache->reqs);
return 0;
}
msg = &req->msg;
n = msg->len;
if (n > buflen) {
xa_unlock(&cache->reqs);
return -EMSGSIZE;
}
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
cache->req_id_next = xas.xa_index + 1;
xa_unlock(&cache->reqs);
id = xas.xa_index;
msg->msg_id = id;
if (msg->opcode == CACHEFILES_OP_OPEN) {
ret = cachefiles_ondemand_get_fd(req);
if (ret)
goto error;
}
if (copy_to_user(_buffer, msg, n) != 0) {
ret = -EFAULT;
goto err_put_fd;
}
/* CLOSE request has no reply */
if (msg->opcode == CACHEFILES_OP_CLOSE) {
xa_erase(&cache->reqs, id);
complete(&req->done);
}
return n;
err_put_fd:
if (msg->opcode == CACHEFILES_OP_OPEN)
close_fd(((struct cachefiles_open *)msg->data)->fd);
error:
xa_erase(&cache->reqs, id);
req->error = ret;
complete(&req->done);
return ret;
}
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
enum cachefiles_opcode opcode,
size_t data_len,
init_req_fn init_req,
void *private)
{
struct cachefiles_cache *cache = object->volume->cache;
struct cachefiles_req *req;
XA_STATE(xas, &cache->reqs, 0);
int ret;
if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
return 0;
if (test_bit(CACHEFILES_DEAD, &cache->flags))
return -EIO;
req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
if (!req)
return -ENOMEM;
req->object = object;
init_completion(&req->done);
req->msg.opcode = opcode;
req->msg.len = sizeof(struct cachefiles_msg) + data_len;
ret = init_req(req, private);
if (ret)
goto out;
do {
/*
* Stop enqueuing the request when daemon is dying. The
* following two operations need to be atomic as a whole.
* 1) check cache state, and
* 2) enqueue request if cache is alive.
* Otherwise the request may be enqueued after xarray has been
* flushed, leaving the orphan request never being completed.
*
* CPU 1 CPU 2
* ===== =====
* test CACHEFILES_DEAD bit
* set CACHEFILES_DEAD bit
* flush requests in the xarray
* enqueue the request
*/
xas_lock(&xas);
if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
xas_unlock(&xas);
ret = -EIO;
goto out;
}
/* coupled with the barrier in cachefiles_flush_reqs() */
smp_mb();
if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
WARN_ON_ONCE(object->ondemand_id == 0);
xas_unlock(&xas);
ret = -EIO;
goto out;
}
xas.xa_index = 0;
xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART)
xas_set_err(&xas, -EBUSY);
xas_store(&xas, req);
xas_clear_mark(&xas, XA_FREE_MARK);
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
ret = xas_error(&xas);
if (ret)
goto out;
wake_up_all(&cache->daemon_pollwq);
wait_for_completion(&req->done);
ret = req->error;
out:
kfree(req);
return ret;
}
static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
void *private)
{
struct cachefiles_object *object = req->object;
struct fscache_cookie *cookie = object->cookie;
struct fscache_volume *volume = object->volume->vcookie;
struct cachefiles_open *load = (void *)req->msg.data;
size_t volume_key_size, cookie_key_size;
void *volume_key, *cookie_key;
/*
* Volume key is a NUL-terminated string. key[0] stores strlen() of the
* string, followed by the content of the string (excluding '\0').
*/
volume_key_size = volume->key[0] + 1;
volume_key = volume->key + 1;
/* Cookie key is binary data, which is netfs specific. */
cookie_key_size = cookie->key_len;
cookie_key = fscache_get_key(cookie);
if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
return -EINVAL;
}
load->volume_key_size = volume_key_size;
load->cookie_key_size = cookie_key_size;
memcpy(load->data, volume_key, volume_key_size);
memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
return 0;
}
static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
void *private)
{
struct cachefiles_object *object = req->object;
int object_id = object->ondemand_id;
/*
* It's possible that object id is still 0 if the cookie looking up
* phase failed before OPEN request has ever been sent. Also avoid
* sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
* anon_fd has already been closed.
*/
if (object_id <= 0)
return -ENOENT;
req->msg.object_id = object_id;
trace_cachefiles_ondemand_close(object, &req->msg);
return 0;
}
struct cachefiles_read_ctx {
loff_t off;
size_t len;
};
static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
void *private)
{
struct cachefiles_object *object = req->object;
struct cachefiles_read *load = (void *)req->msg.data;
struct cachefiles_read_ctx *read_ctx = private;
int object_id = object->ondemand_id;
/* Stop enqueuing requests when daemon has closed anon_fd. */
if (object_id <= 0) {
WARN_ON_ONCE(object_id == 0);
pr_info_once("READ: anonymous fd closed prematurely.\n");
return -EIO;
}
req->msg.object_id = object_id;
load->off = read_ctx->off;
load->len = read_ctx->len;
trace_cachefiles_ondemand_read(object, &req->msg, load);
return 0;
}
int cachefiles_ondemand_init_object(struct cachefiles_object *object)
{
struct fscache_cookie *cookie = object->cookie;
struct fscache_volume *volume = object->volume->vcookie;
size_t volume_key_size, cookie_key_size, data_len;
/*
* CacheFiles will firstly check the cache file under the root cache
* directory. If the coherency check failed, it will fallback to
* creating a new tmpfile as the cache file. Reuse the previously
* allocated object ID if any.
*/
if (object->ondemand_id > 0)
return 0;
volume_key_size = volume->key[0] + 1;
cookie_key_size = cookie->key_len;
data_len = sizeof(struct cachefiles_open) +
volume_key_size + cookie_key_size;
return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
data_len, cachefiles_ondemand_init_open_req, NULL);
}
void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
{
cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
cachefiles_ondemand_init_close_req, NULL);
}
int cachefiles_ondemand_read(struct cachefiles_object *object,
loff_t pos, size_t len)
{
struct cachefiles_read_ctx read_ctx = {pos, len};
return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
sizeof(struct cachefiles_read),
cachefiles_ondemand_init_read_req, &read_ctx);
}
| linux-master | fs/cachefiles/ondemand.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Manage high-level VFS aspects of a cache.
*
* Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/namei.h>
#include "internal.h"
/*
* Bring a cache online.
*/
int cachefiles_add_cache(struct cachefiles_cache *cache)
{
struct fscache_cache *cache_cookie;
struct path path;
struct kstatfs stats;
struct dentry *graveyard, *cachedir, *root;
const struct cred *saved_cred;
int ret;
_enter("");
cache_cookie = fscache_acquire_cache(cache->tag);
if (IS_ERR(cache_cookie))
return PTR_ERR(cache_cookie);
/* we want to work under the module's security ID */
ret = cachefiles_get_security_ID(cache);
if (ret < 0)
goto error_getsec;
cachefiles_begin_secure(cache, &saved_cred);
/* look up the directory at the root of the cache */
ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
if (ret < 0)
goto error_open_root;
cache->mnt = path.mnt;
root = path.dentry;
ret = -EINVAL;
if (is_idmapped_mnt(path.mnt)) {
pr_warn("File cache on idmapped mounts not supported");
goto error_unsupported;
}
/* Check features of the backing filesystem:
* - Directories must support looking up and directory creation
* - We create tmpfiles to handle invalidation
* - We use xattrs to store metadata
* - We need to be able to query the amount of space available
* - We want to be able to sync the filesystem when stopping the cache
* - We use DIO to/from pages, so the blocksize mustn't be too big.
*/
ret = -EOPNOTSUPP;
if (d_is_negative(root) ||
!d_backing_inode(root)->i_op->lookup ||
!d_backing_inode(root)->i_op->mkdir ||
!d_backing_inode(root)->i_op->tmpfile ||
!(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
!root->d_sb->s_op->statfs ||
!root->d_sb->s_op->sync_fs ||
root->d_sb->s_blocksize > PAGE_SIZE)
goto error_unsupported;
ret = -EROFS;
if (sb_rdonly(root->d_sb))
goto error_unsupported;
/* determine the security of the on-disk cache as this governs
* security ID of files we create */
ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
if (ret < 0)
goto error_unsupported;
/* get the cache size and blocksize */
ret = vfs_statfs(&path, &stats);
if (ret < 0)
goto error_unsupported;
ret = -ERANGE;
if (stats.f_bsize <= 0)
goto error_unsupported;
ret = -EOPNOTSUPP;
if (stats.f_bsize > PAGE_SIZE)
goto error_unsupported;
cache->bsize = stats.f_bsize;
cache->bshift = ilog2(stats.f_bsize);
_debug("blksize %u (shift %u)",
cache->bsize, cache->bshift);
_debug("size %llu, avail %llu",
(unsigned long long) stats.f_blocks,
(unsigned long long) stats.f_bavail);
/* set up caching limits */
do_div(stats.f_files, 100);
cache->fstop = stats.f_files * cache->fstop_percent;
cache->fcull = stats.f_files * cache->fcull_percent;
cache->frun = stats.f_files * cache->frun_percent;
_debug("limits {%llu,%llu,%llu} files",
(unsigned long long) cache->frun,
(unsigned long long) cache->fcull,
(unsigned long long) cache->fstop);
do_div(stats.f_blocks, 100);
cache->bstop = stats.f_blocks * cache->bstop_percent;
cache->bcull = stats.f_blocks * cache->bcull_percent;
cache->brun = stats.f_blocks * cache->brun_percent;
_debug("limits {%llu,%llu,%llu} blocks",
(unsigned long long) cache->brun,
(unsigned long long) cache->bcull,
(unsigned long long) cache->bstop);
/* get the cache directory and check its type */
cachedir = cachefiles_get_directory(cache, root, "cache", NULL);
if (IS_ERR(cachedir)) {
ret = PTR_ERR(cachedir);
goto error_unsupported;
}
cache->store = cachedir;
/* get the graveyard directory */
graveyard = cachefiles_get_directory(cache, root, "graveyard", NULL);
if (IS_ERR(graveyard)) {
ret = PTR_ERR(graveyard);
goto error_unsupported;
}
cache->graveyard = graveyard;
cache->cache = cache_cookie;
ret = fscache_add_cache(cache_cookie, &cachefiles_cache_ops, cache);
if (ret < 0)
goto error_add_cache;
/* done */
set_bit(CACHEFILES_READY, &cache->flags);
dput(root);
pr_info("File cache on %s registered\n", cache_cookie->name);
/* check how much space the cache has */
cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
cachefiles_end_secure(cache, saved_cred);
_leave(" = 0 [%px]", cache->cache);
return 0;
error_add_cache:
cachefiles_put_directory(cache->graveyard);
cache->graveyard = NULL;
error_unsupported:
cachefiles_put_directory(cache->store);
cache->store = NULL;
mntput(cache->mnt);
cache->mnt = NULL;
dput(root);
error_open_root:
cachefiles_end_secure(cache, saved_cred);
error_getsec:
fscache_relinquish_cache(cache_cookie);
cache->cache = NULL;
pr_err("Failed to register: %d\n", ret);
return ret;
}
/*
* See if we have space for a number of pages and/or a number of files in the
* cache
*/
int cachefiles_has_space(struct cachefiles_cache *cache,
unsigned fnr, unsigned bnr,
enum cachefiles_has_space_for reason)
{
struct kstatfs stats;
u64 b_avail, b_writing;
int ret;
struct path path = {
.mnt = cache->mnt,
.dentry = cache->mnt->mnt_root,
};
//_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
// (unsigned long long) cache->frun,
// (unsigned long long) cache->fcull,
// (unsigned long long) cache->fstop,
// (unsigned long long) cache->brun,
// (unsigned long long) cache->bcull,
// (unsigned long long) cache->bstop,
// fnr, bnr);
/* find out how many pages of blockdev are available */
memset(&stats, 0, sizeof(stats));
ret = vfs_statfs(&path, &stats);
if (ret < 0) {
trace_cachefiles_vfs_error(NULL, d_inode(path.dentry), ret,
cachefiles_trace_statfs_error);
if (ret == -EIO)
cachefiles_io_error(cache, "statfs failed");
_leave(" = %d", ret);
return ret;
}
b_avail = stats.f_bavail;
b_writing = atomic_long_read(&cache->b_writing);
if (b_avail > b_writing)
b_avail -= b_writing;
else
b_avail = 0;
//_debug("avail %llu,%llu",
// (unsigned long long)stats.f_ffree,
// (unsigned long long)b_avail);
/* see if there is sufficient space */
if (stats.f_ffree > fnr)
stats.f_ffree -= fnr;
else
stats.f_ffree = 0;
if (b_avail > bnr)
b_avail -= bnr;
else
b_avail = 0;
ret = -ENOBUFS;
if (stats.f_ffree < cache->fstop ||
b_avail < cache->bstop)
goto stop_and_begin_cull;
ret = 0;
if (stats.f_ffree < cache->fcull ||
b_avail < cache->bcull)
goto begin_cull;
if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
stats.f_ffree >= cache->frun &&
b_avail >= cache->brun &&
test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
) {
_debug("cease culling");
cachefiles_state_changed(cache);
}
//_leave(" = 0");
return 0;
stop_and_begin_cull:
switch (reason) {
case cachefiles_has_space_for_write:
fscache_count_no_write_space();
break;
case cachefiles_has_space_for_create:
fscache_count_no_create_space();
break;
default:
break;
}
begin_cull:
if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
_debug("### CULL CACHE ###");
cachefiles_state_changed(cache);
}
_leave(" = %d", ret);
return ret;
}
/*
* Mark all the objects as being out of service and queue them all for cleanup.
*/
static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
{
struct cachefiles_object *object;
unsigned int count = 0;
_enter("");
spin_lock(&cache->object_list_lock);
while (!list_empty(&cache->object_list)) {
object = list_first_entry(&cache->object_list,
struct cachefiles_object, cache_link);
cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
list_del_init(&object->cache_link);
fscache_withdraw_cookie(object->cookie);
count++;
if ((count & 63) == 0) {
spin_unlock(&cache->object_list_lock);
cond_resched();
spin_lock(&cache->object_list_lock);
}
}
spin_unlock(&cache->object_list_lock);
_leave(" [%u objs]", count);
}
/*
* Withdraw volumes.
*/
static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
{
_enter("");
for (;;) {
struct cachefiles_volume *volume = NULL;
spin_lock(&cache->object_list_lock);
if (!list_empty(&cache->volumes)) {
volume = list_first_entry(&cache->volumes,
struct cachefiles_volume, cache_link);
list_del_init(&volume->cache_link);
}
spin_unlock(&cache->object_list_lock);
if (!volume)
break;
cachefiles_withdraw_volume(volume);
}
_leave("");
}
/*
* Sync a cache to backing disk.
*/
static void cachefiles_sync_cache(struct cachefiles_cache *cache)
{
const struct cred *saved_cred;
int ret;
_enter("%s", cache->cache->name);
/* make sure all pages pinned by operations on behalf of the netfs are
* written to disc */
cachefiles_begin_secure(cache, &saved_cred);
down_read(&cache->mnt->mnt_sb->s_umount);
ret = sync_filesystem(cache->mnt->mnt_sb);
up_read(&cache->mnt->mnt_sb->s_umount);
cachefiles_end_secure(cache, saved_cred);
if (ret == -EIO)
cachefiles_io_error(cache,
"Attempt to sync backing fs superblock returned error %d",
ret);
}
/*
* Withdraw cache objects.
*/
void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
{
struct fscache_cache *fscache = cache->cache;
pr_info("File cache on %s unregistering\n", fscache->name);
fscache_withdraw_cache(fscache);
/* we now have to destroy all the active objects pertaining to this
* cache - which we do by passing them off to thread pool to be
* disposed of */
cachefiles_withdraw_objects(cache);
fscache_wait_for_objects(fscache);
cachefiles_withdraw_volumes(cache);
cachefiles_sync_cache(cache);
cache->cache = NULL;
fscache_relinquish_cache(fscache);
}
| linux-master | fs/cachefiles/cache.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Key to pathname encoder
*
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/slab.h>
#include "internal.h"
static const char cachefiles_charmap[64] =
"0123456789" /* 0 - 9 */
"abcdefghijklmnopqrstuvwxyz" /* 10 - 35 */
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" /* 36 - 61 */
"_-" /* 62 - 63 */
;
static const char cachefiles_filecharmap[256] = {
/* we skip space and tab and control chars */
[33 ... 46] = 1, /* '!' -> '.' */
/* we skip '/' as it's significant to pathwalk */
[48 ... 127] = 1, /* '0' -> '~' */
};
static inline unsigned int how_many_hex_digits(unsigned int x)
{
return x ? round_up(ilog2(x) + 1, 4) / 4 : 0;
}
/*
* turn the raw key into something cooked
* - the key may be up to NAME_MAX in length (including the length word)
* - "base64" encode the strange keys, mapping 3 bytes of raw to four of
* cooked
* - need to cut the cooked key into 252 char lengths (189 raw bytes)
*/
bool cachefiles_cook_key(struct cachefiles_object *object)
{
const u8 *key = fscache_get_key(object->cookie), *kend;
unsigned char ch;
unsigned int acc, i, n, nle, nbe, keylen = object->cookie->key_len;
unsigned int b64len, len, print, pad;
char *name, sep;
_enter(",%u,%*phN", keylen, keylen, key);
BUG_ON(keylen > NAME_MAX - 3);
print = 1;
for (i = 0; i < keylen; i++) {
ch = key[i];
print &= cachefiles_filecharmap[ch];
}
/* If the path is usable ASCII, then we render it directly */
if (print) {
len = 1 + keylen;
name = kmalloc(len + 1, GFP_KERNEL);
if (!name)
return false;
name[0] = 'D'; /* Data object type, string encoding */
memcpy(name + 1, key, keylen);
goto success;
}
/* See if it makes sense to encode it as "hex,hex,hex" for each 32-bit
* chunk. We rely on the key having been padded out to a whole number
* of 32-bit words.
*/
n = round_up(keylen, 4);
nbe = nle = 0;
for (i = 0; i < n; i += 4) {
u32 be = be32_to_cpu(*(__be32 *)(key + i));
u32 le = le32_to_cpu(*(__le32 *)(key + i));
nbe += 1 + how_many_hex_digits(be);
nle += 1 + how_many_hex_digits(le);
}
b64len = DIV_ROUND_UP(keylen, 3);
pad = b64len * 3 - keylen;
b64len = 2 + b64len * 4; /* Length if we base64-encode it */
_debug("len=%u nbe=%u nle=%u b64=%u", keylen, nbe, nle, b64len);
if (nbe < b64len || nle < b64len) {
unsigned int nlen = min(nbe, nle) + 1;
name = kmalloc(nlen, GFP_KERNEL);
if (!name)
return false;
sep = (nbe <= nle) ? 'S' : 'T'; /* Encoding indicator */
len = 0;
for (i = 0; i < n; i += 4) {
u32 x;
if (nbe <= nle)
x = be32_to_cpu(*(__be32 *)(key + i));
else
x = le32_to_cpu(*(__le32 *)(key + i));
name[len++] = sep;
if (x != 0)
len += snprintf(name + len, nlen - len, "%x", x);
sep = ',';
}
goto success;
}
/* We need to base64-encode it */
name = kmalloc(b64len + 1, GFP_KERNEL);
if (!name)
return false;
name[0] = 'E';
name[1] = '0' + pad;
len = 2;
kend = key + keylen;
do {
acc = *key++;
if (key < kend) {
acc |= *key++ << 8;
if (key < kend)
acc |= *key++ << 16;
}
name[len++] = cachefiles_charmap[acc & 63];
acc >>= 6;
name[len++] = cachefiles_charmap[acc & 63];
acc >>= 6;
name[len++] = cachefiles_charmap[acc & 63];
acc >>= 6;
name[len++] = cachefiles_charmap[acc & 63];
} while (key < kend);
success:
name[len] = 0;
object->d_name = name;
object->d_name_len = len;
_leave(" = %s", object->d_name);
return true;
}
| linux-master | fs/cachefiles/key.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles security management
*
* Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/fs.h>
#include <linux/cred.h>
#include "internal.h"
/*
* determine the security context within which we access the cache from within
* the kernel
*/
int cachefiles_get_security_ID(struct cachefiles_cache *cache)
{
struct cred *new;
int ret;
_enter("{%s}", cache->secctx);
new = prepare_kernel_cred(current);
if (!new) {
ret = -ENOMEM;
goto error;
}
if (cache->secctx) {
ret = set_security_override_from_ctx(new, cache->secctx);
if (ret < 0) {
put_cred(new);
pr_err("Security denies permission to nominate security context: error %d\n",
ret);
goto error;
}
}
cache->cache_cred = new;
ret = 0;
error:
_leave(" = %d", ret);
return ret;
}
/*
* see if mkdir and create can be performed in the root directory
*/
static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
struct dentry *root)
{
int ret;
ret = security_inode_mkdir(d_backing_inode(root), root, 0);
if (ret < 0) {
pr_err("Security denies permission to make dirs: error %d",
ret);
return ret;
}
ret = security_inode_create(d_backing_inode(root), root, 0);
if (ret < 0)
pr_err("Security denies permission to create files: error %d",
ret);
return ret;
}
/*
* check the security details of the on-disk cache
* - must be called with security override in force
* - must return with a security override in force - even in the case of an
* error
*/
int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
struct dentry *root,
const struct cred **_saved_cred)
{
struct cred *new;
int ret;
_enter("");
/* duplicate the cache creds for COW (the override is currently in
* force, so we can use prepare_creds() to do this) */
new = prepare_creds();
if (!new)
return -ENOMEM;
cachefiles_end_secure(cache, *_saved_cred);
/* use the cache root dir's security context as the basis with
* which create files */
ret = set_create_files_as(new, d_backing_inode(root));
if (ret < 0) {
abort_creds(new);
cachefiles_begin_secure(cache, _saved_cred);
_leave(" = %d [cfa]", ret);
return ret;
}
put_cred(cache->cache_cred);
cache->cache_cred = new;
cachefiles_begin_secure(cache, _saved_cred);
ret = cachefiles_check_cache_dir(cache, root);
if (ret == -EOPNOTSUPP)
ret = 0;
_leave(" = %d", ret);
return ret;
}
| linux-master | fs/cachefiles/security.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2001,2002 Richard Russon
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h> /* For bdev_logical_block_size(). */
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/moduleparam.h>
#include <linux/bitmap.h>
#include "sysctl.h"
#include "logfile.h"
#include "quota.h"
#include "usnjrnl.h"
#include "dir.h"
#include "debug.h"
#include "index.h"
#include "inode.h"
#include "aops.h"
#include "layout.h"
#include "malloc.h"
#include "ntfs.h"
/* Number of mounted filesystems which have compression enabled. */
static unsigned long ntfs_nr_compression_users;
/* A global default upcase table and a corresponding reference count. */
static ntfschar *default_upcase;
static unsigned long ntfs_nr_upcase_users;
/* Error constants/strings used in inode.c::ntfs_show_options(). */
typedef enum {
/* One of these must be present, default is ON_ERRORS_CONTINUE. */
ON_ERRORS_PANIC = 0x01,
ON_ERRORS_REMOUNT_RO = 0x02,
ON_ERRORS_CONTINUE = 0x04,
/* Optional, can be combined with any of the above. */
ON_ERRORS_RECOVER = 0x10,
} ON_ERRORS_ACTIONS;
const option_t on_errors_arr[] = {
{ ON_ERRORS_PANIC, "panic" },
{ ON_ERRORS_REMOUNT_RO, "remount-ro", },
{ ON_ERRORS_CONTINUE, "continue", },
{ ON_ERRORS_RECOVER, "recover" },
{ 0, NULL }
};
/**
* simple_getbool - convert input string to a boolean value
* @s: input string to convert
* @setval: where to store the output boolean value
*
* Copied from old ntfs driver (which copied from vfat driver).
*
* "1", "yes", "true", or an empty string are converted to %true.
* "0", "no", and "false" are converted to %false.
*
* Return: %1 if the string is converted or was empty and *setval contains it;
* %0 if the string was not valid.
*/
static int simple_getbool(char *s, bool *setval)
{
if (s) {
if (!strcmp(s, "1") || !strcmp(s, "yes") || !strcmp(s, "true"))
*setval = true;
else if (!strcmp(s, "0") || !strcmp(s, "no") ||
!strcmp(s, "false"))
*setval = false;
else
return 0;
} else
*setval = true;
return 1;
}
/**
* parse_options - parse the (re)mount options
* @vol: ntfs volume
* @opt: string containing the (re)mount options
*
* Parse the recognized options in @opt for the ntfs volume described by @vol.
*/
static bool parse_options(ntfs_volume *vol, char *opt)
{
char *p, *v, *ov;
static char *utf8 = "utf8";
int errors = 0, sloppy = 0;
kuid_t uid = INVALID_UID;
kgid_t gid = INVALID_GID;
umode_t fmask = (umode_t)-1, dmask = (umode_t)-1;
int mft_zone_multiplier = -1, on_errors = -1;
int show_sys_files = -1, case_sensitive = -1, disable_sparse = -1;
struct nls_table *nls_map = NULL, *old_nls;
/* I am lazy... (-8 */
#define NTFS_GETOPT_WITH_DEFAULT(option, variable, default_value) \
if (!strcmp(p, option)) { \
if (!v || !*v) \
variable = default_value; \
else { \
variable = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
} \
}
#define NTFS_GETOPT(option, variable) \
if (!strcmp(p, option)) { \
if (!v || !*v) \
goto needs_arg; \
variable = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
}
#define NTFS_GETOPT_UID(option, variable) \
if (!strcmp(p, option)) { \
uid_t uid_value; \
if (!v || !*v) \
goto needs_arg; \
uid_value = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
variable = make_kuid(current_user_ns(), uid_value); \
if (!uid_valid(variable)) \
goto needs_val; \
}
#define NTFS_GETOPT_GID(option, variable) \
if (!strcmp(p, option)) { \
gid_t gid_value; \
if (!v || !*v) \
goto needs_arg; \
gid_value = simple_strtoul(ov = v, &v, 0); \
if (*v) \
goto needs_val; \
variable = make_kgid(current_user_ns(), gid_value); \
if (!gid_valid(variable)) \
goto needs_val; \
}
#define NTFS_GETOPT_OCTAL(option, variable) \
if (!strcmp(p, option)) { \
if (!v || !*v) \
goto needs_arg; \
variable = simple_strtoul(ov = v, &v, 8); \
if (*v) \
goto needs_val; \
}
#define NTFS_GETOPT_BOOL(option, variable) \
if (!strcmp(p, option)) { \
bool val; \
if (!simple_getbool(v, &val)) \
goto needs_bool; \
variable = val; \
}
#define NTFS_GETOPT_OPTIONS_ARRAY(option, variable, opt_array) \
if (!strcmp(p, option)) { \
int _i; \
if (!v || !*v) \
goto needs_arg; \
ov = v; \
if (variable == -1) \
variable = 0; \
for (_i = 0; opt_array[_i].str && *opt_array[_i].str; _i++) \
if (!strcmp(opt_array[_i].str, v)) { \
variable |= opt_array[_i].val; \
break; \
} \
if (!opt_array[_i].str || !*opt_array[_i].str) \
goto needs_val; \
}
if (!opt || !*opt)
goto no_mount_options;
ntfs_debug("Entering with mount options string: %s", opt);
while ((p = strsep(&opt, ","))) {
if ((v = strchr(p, '=')))
*v++ = 0;
NTFS_GETOPT_UID("uid", uid)
else NTFS_GETOPT_GID("gid", gid)
else NTFS_GETOPT_OCTAL("umask", fmask = dmask)
else NTFS_GETOPT_OCTAL("fmask", fmask)
else NTFS_GETOPT_OCTAL("dmask", dmask)
else NTFS_GETOPT("mft_zone_multiplier", mft_zone_multiplier)
else NTFS_GETOPT_WITH_DEFAULT("sloppy", sloppy, true)
else NTFS_GETOPT_BOOL("show_sys_files", show_sys_files)
else NTFS_GETOPT_BOOL("case_sensitive", case_sensitive)
else NTFS_GETOPT_BOOL("disable_sparse", disable_sparse)
else NTFS_GETOPT_OPTIONS_ARRAY("errors", on_errors,
on_errors_arr)
else if (!strcmp(p, "posix") || !strcmp(p, "show_inodes"))
ntfs_warning(vol->sb, "Ignoring obsolete option %s.",
p);
else if (!strcmp(p, "nls") || !strcmp(p, "iocharset")) {
if (!strcmp(p, "iocharset"))
ntfs_warning(vol->sb, "Option iocharset is "
"deprecated. Please use "
"option nls=<charsetname> in "
"the future.");
if (!v || !*v)
goto needs_arg;
use_utf8:
old_nls = nls_map;
nls_map = load_nls(v);
if (!nls_map) {
if (!old_nls) {
ntfs_error(vol->sb, "NLS character set "
"%s not found.", v);
return false;
}
ntfs_error(vol->sb, "NLS character set %s not "
"found. Using previous one %s.",
v, old_nls->charset);
nls_map = old_nls;
} else /* nls_map */ {
unload_nls(old_nls);
}
} else if (!strcmp(p, "utf8")) {
bool val = false;
ntfs_warning(vol->sb, "Option utf8 is no longer "
"supported, using option nls=utf8. Please "
"use option nls=utf8 in the future and "
"make sure utf8 is compiled either as a "
"module or into the kernel.");
if (!v || !*v)
val = true;
else if (!simple_getbool(v, &val))
goto needs_bool;
if (val) {
v = utf8;
goto use_utf8;
}
} else {
ntfs_error(vol->sb, "Unrecognized mount option %s.", p);
if (errors < INT_MAX)
errors++;
}
#undef NTFS_GETOPT_OPTIONS_ARRAY
#undef NTFS_GETOPT_BOOL
#undef NTFS_GETOPT
#undef NTFS_GETOPT_WITH_DEFAULT
}
no_mount_options:
if (errors && !sloppy)
return false;
if (sloppy)
ntfs_warning(vol->sb, "Sloppy option given. Ignoring "
"unrecognized mount option(s) and continuing.");
/* Keep this first! */
if (on_errors != -1) {
if (!on_errors) {
ntfs_error(vol->sb, "Invalid errors option argument "
"or bug in options parser.");
return false;
}
}
if (nls_map) {
if (vol->nls_map && vol->nls_map != nls_map) {
ntfs_error(vol->sb, "Cannot change NLS character set "
"on remount.");
return false;
} /* else (!vol->nls_map) */
ntfs_debug("Using NLS character set %s.", nls_map->charset);
vol->nls_map = nls_map;
} else /* (!nls_map) */ {
if (!vol->nls_map) {
vol->nls_map = load_nls_default();
if (!vol->nls_map) {
ntfs_error(vol->sb, "Failed to load default "
"NLS character set.");
return false;
}
ntfs_debug("Using default NLS character set (%s).",
vol->nls_map->charset);
}
}
if (mft_zone_multiplier != -1) {
if (vol->mft_zone_multiplier && vol->mft_zone_multiplier !=
mft_zone_multiplier) {
ntfs_error(vol->sb, "Cannot change mft_zone_multiplier "
"on remount.");
return false;
}
if (mft_zone_multiplier < 1 || mft_zone_multiplier > 4) {
ntfs_error(vol->sb, "Invalid mft_zone_multiplier. "
"Using default value, i.e. 1.");
mft_zone_multiplier = 1;
}
vol->mft_zone_multiplier = mft_zone_multiplier;
}
if (!vol->mft_zone_multiplier)
vol->mft_zone_multiplier = 1;
if (on_errors != -1)
vol->on_errors = on_errors;
if (!vol->on_errors || vol->on_errors == ON_ERRORS_RECOVER)
vol->on_errors |= ON_ERRORS_CONTINUE;
if (uid_valid(uid))
vol->uid = uid;
if (gid_valid(gid))
vol->gid = gid;
if (fmask != (umode_t)-1)
vol->fmask = fmask;
if (dmask != (umode_t)-1)
vol->dmask = dmask;
if (show_sys_files != -1) {
if (show_sys_files)
NVolSetShowSystemFiles(vol);
else
NVolClearShowSystemFiles(vol);
}
if (case_sensitive != -1) {
if (case_sensitive)
NVolSetCaseSensitive(vol);
else
NVolClearCaseSensitive(vol);
}
if (disable_sparse != -1) {
if (disable_sparse)
NVolClearSparseEnabled(vol);
else {
if (!NVolSparseEnabled(vol) &&
vol->major_ver && vol->major_ver < 3)
ntfs_warning(vol->sb, "Not enabling sparse "
"support due to NTFS volume "
"version %i.%i (need at least "
"version 3.0).", vol->major_ver,
vol->minor_ver);
else
NVolSetSparseEnabled(vol);
}
}
return true;
needs_arg:
ntfs_error(vol->sb, "The %s option requires an argument.", p);
return false;
needs_bool:
ntfs_error(vol->sb, "The %s option requires a boolean argument.", p);
return false;
needs_val:
ntfs_error(vol->sb, "Invalid %s option argument: %s", p, ov);
return false;
}
#ifdef NTFS_RW
/**
* ntfs_write_volume_flags - write new flags to the volume information flags
* @vol: ntfs volume on which to modify the flags
* @flags: new flags value for the volume information flags
*
* Internal function. You probably want to use ntfs_{set,clear}_volume_flags()
* instead (see below).
*
* Replace the volume information flags on the volume @vol with the value
* supplied in @flags. Note, this overwrites the volume information flags, so
* make sure to combine the flags you want to modify with the old flags and use
* the result when calling ntfs_write_volume_flags().
*
* Return 0 on success and -errno on error.
*/
static int ntfs_write_volume_flags(ntfs_volume *vol, const VOLUME_FLAGS flags)
{
ntfs_inode *ni = NTFS_I(vol->vol_ino);
MFT_RECORD *m;
VOLUME_INFORMATION *vi;
ntfs_attr_search_ctx *ctx;
int err;
ntfs_debug("Entering, old flags = 0x%x, new flags = 0x%x.",
le16_to_cpu(vol->vol_flags), le16_to_cpu(flags));
if (vol->vol_flags == flags)
goto done;
BUG_ON(!ni);
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
err = -ENOMEM;
goto put_unm_err_out;
}
err = ntfs_attr_lookup(AT_VOLUME_INFORMATION, NULL, 0, 0, 0, NULL, 0,
ctx);
if (err)
goto put_unm_err_out;
vi = (VOLUME_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
vol->vol_flags = vi->flags = flags;
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
done:
ntfs_debug("Done.");
return 0;
put_unm_err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
err_out:
ntfs_error(vol->sb, "Failed with error code %i.", -err);
return err;
}
/**
* ntfs_set_volume_flags - set bits in the volume information flags
* @vol: ntfs volume on which to modify the flags
* @flags: flags to set on the volume
*
* Set the bits in @flags in the volume information flags on the volume @vol.
*
* Return 0 on success and -errno on error.
*/
static inline int ntfs_set_volume_flags(ntfs_volume *vol, VOLUME_FLAGS flags)
{
flags &= VOLUME_FLAGS_MASK;
return ntfs_write_volume_flags(vol, vol->vol_flags | flags);
}
/**
* ntfs_clear_volume_flags - clear bits in the volume information flags
* @vol: ntfs volume on which to modify the flags
* @flags: flags to clear on the volume
*
* Clear the bits in @flags in the volume information flags on the volume @vol.
*
* Return 0 on success and -errno on error.
*/
static inline int ntfs_clear_volume_flags(ntfs_volume *vol, VOLUME_FLAGS flags)
{
flags &= VOLUME_FLAGS_MASK;
flags = vol->vol_flags & cpu_to_le16(~le16_to_cpu(flags));
return ntfs_write_volume_flags(vol, flags);
}
#endif /* NTFS_RW */
/**
* ntfs_remount - change the mount options of a mounted ntfs filesystem
* @sb: superblock of mounted ntfs filesystem
* @flags: remount flags
* @opt: remount options string
*
* Change the mount options of an already mounted ntfs filesystem.
*
* NOTE: The VFS sets the @sb->s_flags remount flags to @flags after
* ntfs_remount() returns successfully (i.e. returns 0). Otherwise,
* @sb->s_flags are not changed.
*/
static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
{
ntfs_volume *vol = NTFS_SB(sb);
ntfs_debug("Entering with remount options string: %s", opt);
sync_filesystem(sb);
#ifndef NTFS_RW
/* For read-only compiled driver, enforce read-only flag. */
*flags |= SB_RDONLY;
#else /* NTFS_RW */
/*
* For the read-write compiled driver, if we are remounting read-write,
* make sure there are no volume errors and that no unsupported volume
* flags are set. Also, empty the logfile journal as it would become
* stale as soon as something is written to the volume and mark the
* volume dirty so that chkdsk is run if the volume is not umounted
* cleanly. Finally, mark the quotas out of date so Windows rescans
* the volume on boot and updates them.
*
* When remounting read-only, mark the volume clean if no volume errors
* have occurred.
*/
if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
static const char *es = ". Cannot remount read-write.";
/* Remounting read-write. */
if (NVolErrors(vol)) {
ntfs_error(sb, "Volume has errors and is read-only%s",
es);
return -EROFS;
}
if (vol->vol_flags & VOLUME_IS_DIRTY) {
ntfs_error(sb, "Volume is dirty and read-only%s", es);
return -EROFS;
}
if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
ntfs_error(sb, "Volume has been modified by chkdsk "
"and is read-only%s", es);
return -EROFS;
}
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
ntfs_error(sb, "Volume has unsupported flags set "
"(0x%x) and is read-only%s",
(unsigned)le16_to_cpu(vol->vol_flags),
es);
return -EROFS;
}
if (ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
ntfs_error(sb, "Failed to set dirty bit in volume "
"information flags%s", es);
return -EROFS;
}
#if 0
// TODO: Enable this code once we start modifying anything that
// is different between NTFS 1.2 and 3.x...
/* Set NT4 compatibility flag on newer NTFS version volumes. */
if ((vol->major_ver > 1)) {
if (ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
ntfs_error(sb, "Failed to set NT4 "
"compatibility flag%s", es);
NVolSetErrors(vol);
return -EROFS;
}
}
#endif
if (!ntfs_empty_logfile(vol->logfile_ino)) {
ntfs_error(sb, "Failed to empty journal $LogFile%s",
es);
NVolSetErrors(vol);
return -EROFS;
}
if (!ntfs_mark_quotas_out_of_date(vol)) {
ntfs_error(sb, "Failed to mark quotas out of date%s",
es);
NVolSetErrors(vol);
return -EROFS;
}
if (!ntfs_stamp_usnjrnl(vol)) {
ntfs_error(sb, "Failed to stamp transaction log "
"($UsnJrnl)%s", es);
NVolSetErrors(vol);
return -EROFS;
}
} else if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
/* Remounting read-only. */
if (!NVolErrors(vol)) {
if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
ntfs_warning(sb, "Failed to clear dirty bit "
"in volume information "
"flags. Run chkdsk.");
}
}
#endif /* NTFS_RW */
// TODO: Deal with *flags.
if (!parse_options(vol, opt))
return -EINVAL;
ntfs_debug("Done.");
return 0;
}
/**
* is_boot_sector_ntfs - check whether a boot sector is a valid NTFS boot sector
* @sb: Super block of the device to which @b belongs.
* @b: Boot sector of device @sb to check.
* @silent: If 'true', all output will be silenced.
*
* is_boot_sector_ntfs() checks whether the boot sector @b is a valid NTFS boot
* sector. Returns 'true' if it is valid and 'false' if not.
*
* @sb is only needed for warning/error output, i.e. it can be NULL when silent
* is 'true'.
*/
static bool is_boot_sector_ntfs(const struct super_block *sb,
const NTFS_BOOT_SECTOR *b, const bool silent)
{
/*
* Check that checksum == sum of u32 values from b to the checksum
* field. If checksum is zero, no checking is done. We will work when
* the checksum test fails, since some utilities update the boot sector
* ignoring the checksum which leaves the checksum out-of-date. We
* report a warning if this is the case.
*/
if ((void*)b < (void*)&b->checksum && b->checksum && !silent) {
le32 *u;
u32 i;
for (i = 0, u = (le32*)b; u < (le32*)(&b->checksum); ++u)
i += le32_to_cpup(u);
if (le32_to_cpu(b->checksum) != i)
ntfs_warning(sb, "Invalid boot sector checksum.");
}
/* Check OEMidentifier is "NTFS " */
if (b->oem_id != magicNTFS)
goto not_ntfs;
/* Check bytes per sector value is between 256 and 4096. */
if (le16_to_cpu(b->bpb.bytes_per_sector) < 0x100 ||
le16_to_cpu(b->bpb.bytes_per_sector) > 0x1000)
goto not_ntfs;
/* Check sectors per cluster value is valid. */
switch (b->bpb.sectors_per_cluster) {
case 1: case 2: case 4: case 8: case 16: case 32: case 64: case 128:
break;
default:
goto not_ntfs;
}
/* Check the cluster size is not above the maximum (64kiB). */
if ((u32)le16_to_cpu(b->bpb.bytes_per_sector) *
b->bpb.sectors_per_cluster > NTFS_MAX_CLUSTER_SIZE)
goto not_ntfs;
/* Check reserved/unused fields are really zero. */
if (le16_to_cpu(b->bpb.reserved_sectors) ||
le16_to_cpu(b->bpb.root_entries) ||
le16_to_cpu(b->bpb.sectors) ||
le16_to_cpu(b->bpb.sectors_per_fat) ||
le32_to_cpu(b->bpb.large_sectors) || b->bpb.fats)
goto not_ntfs;
/* Check clusters per file mft record value is valid. */
if ((u8)b->clusters_per_mft_record < 0xe1 ||
(u8)b->clusters_per_mft_record > 0xf7)
switch (b->clusters_per_mft_record) {
case 1: case 2: case 4: case 8: case 16: case 32: case 64:
break;
default:
goto not_ntfs;
}
/* Check clusters per index block value is valid. */
if ((u8)b->clusters_per_index_record < 0xe1 ||
(u8)b->clusters_per_index_record > 0xf7)
switch (b->clusters_per_index_record) {
case 1: case 2: case 4: case 8: case 16: case 32: case 64:
break;
default:
goto not_ntfs;
}
/*
* Check for valid end of sector marker. We will work without it, but
* many BIOSes will refuse to boot from a bootsector if the magic is
* incorrect, so we emit a warning.
*/
if (!silent && b->end_of_sector_marker != cpu_to_le16(0xaa55))
ntfs_warning(sb, "Invalid end of sector marker.");
return true;
not_ntfs:
return false;
}
/**
* read_ntfs_boot_sector - read the NTFS boot sector of a device
* @sb: super block of device to read the boot sector from
* @silent: if true, suppress all output
*
* Reads the boot sector from the device and validates it. If that fails, tries
* to read the backup boot sector, first from the end of the device a-la NT4 and
* later and then from the middle of the device a-la NT3.51 and before.
*
* If a valid boot sector is found but it is not the primary boot sector, we
* repair the primary boot sector silently (unless the device is read-only or
* the primary boot sector is not accessible).
*
* NOTE: To call this function, @sb must have the fields s_dev, the ntfs super
* block (u.ntfs_sb), nr_blocks and the device flags (s_flags) initialized
* to their respective values.
*
* Return the unlocked buffer head containing the boot sector or NULL on error.
*/
static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
const int silent)
{
const char *read_err_str = "Unable to read %s boot sector.";
struct buffer_head *bh_primary, *bh_backup;
sector_t nr_blocks = NTFS_SB(sb)->nr_blocks;
/* Try to read primary boot sector. */
if ((bh_primary = sb_bread(sb, 0))) {
if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
bh_primary->b_data, silent))
return bh_primary;
if (!silent)
ntfs_error(sb, "Primary boot sector is invalid.");
} else if (!silent)
ntfs_error(sb, read_err_str, "primary");
if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
if (bh_primary)
brelse(bh_primary);
if (!silent)
ntfs_error(sb, "Mount option errors=recover not used. "
"Aborting without trying to recover.");
return NULL;
}
/* Try to read NT4+ backup boot sector. */
if ((bh_backup = sb_bread(sb, nr_blocks - 1))) {
if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
bh_backup->b_data, silent))
goto hotfix_primary_boot_sector;
brelse(bh_backup);
} else if (!silent)
ntfs_error(sb, read_err_str, "backup");
/* Try to read NT3.51- backup boot sector. */
if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
bh_backup->b_data, silent))
goto hotfix_primary_boot_sector;
if (!silent)
ntfs_error(sb, "Could not find a valid backup boot "
"sector.");
brelse(bh_backup);
} else if (!silent)
ntfs_error(sb, read_err_str, "backup");
/* We failed. Cleanup and return. */
if (bh_primary)
brelse(bh_primary);
return NULL;
hotfix_primary_boot_sector:
if (bh_primary) {
/*
* If we managed to read sector zero and the volume is not
* read-only, copy the found, valid backup boot sector to the
* primary boot sector. Note we only copy the actual boot
* sector structure, not the actual whole device sector as that
* may be bigger and would potentially damage the $Boot system
* file (FIXME: Would be nice to know if the backup boot sector
* on a large sector device contains the whole boot loader or
* just the first 512 bytes).
*/
if (!sb_rdonly(sb)) {
ntfs_warning(sb, "Hot-fix: Recovering invalid primary "
"boot sector from backup copy.");
memcpy(bh_primary->b_data, bh_backup->b_data,
NTFS_BLOCK_SIZE);
mark_buffer_dirty(bh_primary);
sync_dirty_buffer(bh_primary);
if (buffer_uptodate(bh_primary)) {
brelse(bh_backup);
return bh_primary;
}
ntfs_error(sb, "Hot-fix: Device write error while "
"recovering primary boot sector.");
} else {
ntfs_warning(sb, "Hot-fix: Recovery of primary boot "
"sector failed: Read-only mount.");
}
brelse(bh_primary);
}
ntfs_warning(sb, "Using backup boot sector.");
return bh_backup;
}
/**
* parse_ntfs_boot_sector - parse the boot sector and store the data in @vol
* @vol: volume structure to initialise with data from boot sector
* @b: boot sector to parse
*
* Parse the ntfs boot sector @b and store all imporant information therein in
* the ntfs super block @vol. Return 'true' on success and 'false' on error.
*/
static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
{
unsigned int sectors_per_cluster_bits, nr_hidden_sects;
int clusters_per_mft_record, clusters_per_index_record;
s64 ll;
vol->sector_size = le16_to_cpu(b->bpb.bytes_per_sector);
vol->sector_size_bits = ffs(vol->sector_size) - 1;
ntfs_debug("vol->sector_size = %i (0x%x)", vol->sector_size,
vol->sector_size);
ntfs_debug("vol->sector_size_bits = %i (0x%x)", vol->sector_size_bits,
vol->sector_size_bits);
if (vol->sector_size < vol->sb->s_blocksize) {
ntfs_error(vol->sb, "Sector size (%i) is smaller than the "
"device block size (%lu). This is not "
"supported. Sorry.", vol->sector_size,
vol->sb->s_blocksize);
return false;
}
ntfs_debug("sectors_per_cluster = 0x%x", b->bpb.sectors_per_cluster);
sectors_per_cluster_bits = ffs(b->bpb.sectors_per_cluster) - 1;
ntfs_debug("sectors_per_cluster_bits = 0x%x",
sectors_per_cluster_bits);
nr_hidden_sects = le32_to_cpu(b->bpb.hidden_sectors);
ntfs_debug("number of hidden sectors = 0x%x", nr_hidden_sects);
vol->cluster_size = vol->sector_size << sectors_per_cluster_bits;
vol->cluster_size_mask = vol->cluster_size - 1;
vol->cluster_size_bits = ffs(vol->cluster_size) - 1;
ntfs_debug("vol->cluster_size = %i (0x%x)", vol->cluster_size,
vol->cluster_size);
ntfs_debug("vol->cluster_size_mask = 0x%x", vol->cluster_size_mask);
ntfs_debug("vol->cluster_size_bits = %i", vol->cluster_size_bits);
if (vol->cluster_size < vol->sector_size) {
ntfs_error(vol->sb, "Cluster size (%i) is smaller than the "
"sector size (%i). This is not supported. "
"Sorry.", vol->cluster_size, vol->sector_size);
return false;
}
clusters_per_mft_record = b->clusters_per_mft_record;
ntfs_debug("clusters_per_mft_record = %i (0x%x)",
clusters_per_mft_record, clusters_per_mft_record);
if (clusters_per_mft_record > 0)
vol->mft_record_size = vol->cluster_size <<
(ffs(clusters_per_mft_record) - 1);
else
/*
* When mft_record_size < cluster_size, clusters_per_mft_record
* = -log2(mft_record_size) bytes. mft_record_size normaly is
* 1024 bytes, which is encoded as 0xF6 (-10 in decimal).
*/
vol->mft_record_size = 1 << -clusters_per_mft_record;
vol->mft_record_size_mask = vol->mft_record_size - 1;
vol->mft_record_size_bits = ffs(vol->mft_record_size) - 1;
ntfs_debug("vol->mft_record_size = %i (0x%x)", vol->mft_record_size,
vol->mft_record_size);
ntfs_debug("vol->mft_record_size_mask = 0x%x",
vol->mft_record_size_mask);
ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
vol->mft_record_size_bits, vol->mft_record_size_bits);
/*
* We cannot support mft record sizes above the PAGE_SIZE since
* we store $MFT/$DATA, the table of mft records in the page cache.
*/
if (vol->mft_record_size > PAGE_SIZE) {
ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
"PAGE_SIZE on your system (%lu). "
"This is not supported. Sorry.",
vol->mft_record_size, PAGE_SIZE);
return false;
}
/* We cannot support mft record sizes below the sector size. */
if (vol->mft_record_size < vol->sector_size) {
ntfs_error(vol->sb, "Mft record size (%i) is smaller than the "
"sector size (%i). This is not supported. "
"Sorry.", vol->mft_record_size,
vol->sector_size);
return false;
}
clusters_per_index_record = b->clusters_per_index_record;
ntfs_debug("clusters_per_index_record = %i (0x%x)",
clusters_per_index_record, clusters_per_index_record);
if (clusters_per_index_record > 0)
vol->index_record_size = vol->cluster_size <<
(ffs(clusters_per_index_record) - 1);
else
/*
* When index_record_size < cluster_size,
* clusters_per_index_record = -log2(index_record_size) bytes.
* index_record_size normaly equals 4096 bytes, which is
* encoded as 0xF4 (-12 in decimal).
*/
vol->index_record_size = 1 << -clusters_per_index_record;
vol->index_record_size_mask = vol->index_record_size - 1;
vol->index_record_size_bits = ffs(vol->index_record_size) - 1;
ntfs_debug("vol->index_record_size = %i (0x%x)",
vol->index_record_size, vol->index_record_size);
ntfs_debug("vol->index_record_size_mask = 0x%x",
vol->index_record_size_mask);
ntfs_debug("vol->index_record_size_bits = %i (0x%x)",
vol->index_record_size_bits,
vol->index_record_size_bits);
/* We cannot support index record sizes below the sector size. */
if (vol->index_record_size < vol->sector_size) {
ntfs_error(vol->sb, "Index record size (%i) is smaller than "
"the sector size (%i). This is not "
"supported. Sorry.", vol->index_record_size,
vol->sector_size);
return false;
}
/*
* Get the size of the volume in clusters and check for 64-bit-ness.
* Windows currently only uses 32 bits to save the clusters so we do
* the same as it is much faster on 32-bit CPUs.
*/
ll = sle64_to_cpu(b->number_of_sectors) >> sectors_per_cluster_bits;
if ((u64)ll >= 1ULL << 32) {
ntfs_error(vol->sb, "Cannot handle 64-bit clusters. Sorry.");
return false;
}
vol->nr_clusters = ll;
ntfs_debug("vol->nr_clusters = 0x%llx", (long long)vol->nr_clusters);
/*
* On an architecture where unsigned long is 32-bits, we restrict the
* volume size to 2TiB (2^41). On a 64-bit architecture, the compiler
* will hopefully optimize the whole check away.
*/
if (sizeof(unsigned long) < 8) {
if ((ll << vol->cluster_size_bits) >= (1ULL << 41)) {
ntfs_error(vol->sb, "Volume size (%lluTiB) is too "
"large for this architecture. "
"Maximum supported is 2TiB. Sorry.",
(unsigned long long)ll >> (40 -
vol->cluster_size_bits));
return false;
}
}
ll = sle64_to_cpu(b->mft_lcn);
if (ll >= vol->nr_clusters) {
ntfs_error(vol->sb, "MFT LCN (%lli, 0x%llx) is beyond end of "
"volume. Weird.", (unsigned long long)ll,
(unsigned long long)ll);
return false;
}
vol->mft_lcn = ll;
ntfs_debug("vol->mft_lcn = 0x%llx", (long long)vol->mft_lcn);
ll = sle64_to_cpu(b->mftmirr_lcn);
if (ll >= vol->nr_clusters) {
ntfs_error(vol->sb, "MFTMirr LCN (%lli, 0x%llx) is beyond end "
"of volume. Weird.", (unsigned long long)ll,
(unsigned long long)ll);
return false;
}
vol->mftmirr_lcn = ll;
ntfs_debug("vol->mftmirr_lcn = 0x%llx", (long long)vol->mftmirr_lcn);
#ifdef NTFS_RW
/*
* Work out the size of the mft mirror in number of mft records. If the
* cluster size is less than or equal to the size taken by four mft
* records, the mft mirror stores the first four mft records. If the
* cluster size is bigger than the size taken by four mft records, the
* mft mirror contains as many mft records as will fit into one
* cluster.
*/
if (vol->cluster_size <= (4 << vol->mft_record_size_bits))
vol->mftmirr_size = 4;
else
vol->mftmirr_size = vol->cluster_size >>
vol->mft_record_size_bits;
ntfs_debug("vol->mftmirr_size = %i", vol->mftmirr_size);
#endif /* NTFS_RW */
vol->serial_no = le64_to_cpu(b->volume_serial_number);
ntfs_debug("vol->serial_no = 0x%llx",
(unsigned long long)vol->serial_no);
return true;
}
/**
* ntfs_setup_allocators - initialize the cluster and mft allocators
* @vol: volume structure for which to setup the allocators
*
* Setup the cluster (lcn) and mft allocators to the starting values.
*/
static void ntfs_setup_allocators(ntfs_volume *vol)
{
#ifdef NTFS_RW
LCN mft_zone_size, mft_lcn;
#endif /* NTFS_RW */
ntfs_debug("vol->mft_zone_multiplier = 0x%x",
vol->mft_zone_multiplier);
#ifdef NTFS_RW
/* Determine the size of the MFT zone. */
mft_zone_size = vol->nr_clusters;
switch (vol->mft_zone_multiplier) { /* % of volume size in clusters */
case 4:
mft_zone_size >>= 1; /* 50% */
break;
case 3:
mft_zone_size = (mft_zone_size +
(mft_zone_size >> 1)) >> 2; /* 37.5% */
break;
case 2:
mft_zone_size >>= 2; /* 25% */
break;
/* case 1: */
default:
mft_zone_size >>= 3; /* 12.5% */
break;
}
/* Setup the mft zone. */
vol->mft_zone_start = vol->mft_zone_pos = vol->mft_lcn;
ntfs_debug("vol->mft_zone_pos = 0x%llx",
(unsigned long long)vol->mft_zone_pos);
/*
* Calculate the mft_lcn for an unmodified NTFS volume (see mkntfs
* source) and if the actual mft_lcn is in the expected place or even
* further to the front of the volume, extend the mft_zone to cover the
* beginning of the volume as well. This is in order to protect the
* area reserved for the mft bitmap as well within the mft_zone itself.
* On non-standard volumes we do not protect it as the overhead would
* be higher than the speed increase we would get by doing it.
*/
mft_lcn = (8192 + 2 * vol->cluster_size - 1) / vol->cluster_size;
if (mft_lcn * vol->cluster_size < 16 * 1024)
mft_lcn = (16 * 1024 + vol->cluster_size - 1) /
vol->cluster_size;
if (vol->mft_zone_start <= mft_lcn)
vol->mft_zone_start = 0;
ntfs_debug("vol->mft_zone_start = 0x%llx",
(unsigned long long)vol->mft_zone_start);
/*
* Need to cap the mft zone on non-standard volumes so that it does
* not point outside the boundaries of the volume. We do this by
* halving the zone size until we are inside the volume.
*/
vol->mft_zone_end = vol->mft_lcn + mft_zone_size;
while (vol->mft_zone_end >= vol->nr_clusters) {
mft_zone_size >>= 1;
vol->mft_zone_end = vol->mft_lcn + mft_zone_size;
}
ntfs_debug("vol->mft_zone_end = 0x%llx",
(unsigned long long)vol->mft_zone_end);
/*
* Set the current position within each data zone to the start of the
* respective zone.
*/
vol->data1_zone_pos = vol->mft_zone_end;
ntfs_debug("vol->data1_zone_pos = 0x%llx",
(unsigned long long)vol->data1_zone_pos);
vol->data2_zone_pos = 0;
ntfs_debug("vol->data2_zone_pos = 0x%llx",
(unsigned long long)vol->data2_zone_pos);
/* Set the mft data allocation position to mft record 24. */
vol->mft_data_pos = 24;
ntfs_debug("vol->mft_data_pos = 0x%llx",
(unsigned long long)vol->mft_data_pos);
#endif /* NTFS_RW */
}
#ifdef NTFS_RW
/**
* load_and_init_mft_mirror - load and setup the mft mirror inode for a volume
* @vol: ntfs super block describing device whose mft mirror to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_init_mft_mirror(ntfs_volume *vol)
{
struct inode *tmp_ino;
ntfs_inode *tmp_ni;
ntfs_debug("Entering.");
/* Get mft mirror inode. */
tmp_ino = ntfs_iget(vol->sb, FILE_MFTMirr);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
/* Caller will display error message. */
return false;
}
/*
* Re-initialize some specifics about $MFTMirr's inode as
* ntfs_read_inode() will have set up the default ones.
*/
/* Set uid and gid to root. */
tmp_ino->i_uid = GLOBAL_ROOT_UID;
tmp_ino->i_gid = GLOBAL_ROOT_GID;
/* Regular file. No access for anyone. */
tmp_ino->i_mode = S_IFREG;
/* No VFS initiated operations allowed for $MFTMirr. */
tmp_ino->i_op = &ntfs_empty_inode_ops;
tmp_ino->i_fop = &ntfs_empty_file_ops;
/* Put in our special address space operations. */
tmp_ino->i_mapping->a_ops = &ntfs_mst_aops;
tmp_ni = NTFS_I(tmp_ino);
/* The $MFTMirr, like the $MFT is multi sector transfer protected. */
NInoSetMstProtected(tmp_ni);
NInoSetSparseDisabled(tmp_ni);
/*
* Set up our little cheat allowing us to reuse the async read io
* completion handler for directories.
*/
tmp_ni->itype.index.block_size = vol->mft_record_size;
tmp_ni->itype.index.block_size_bits = vol->mft_record_size_bits;
vol->mftmirr_ino = tmp_ino;
ntfs_debug("Done.");
return true;
}
/**
* check_mft_mirror - compare contents of the mft mirror with the mft
* @vol: ntfs super block describing device whose mft mirror to check
*
* Return 'true' on success or 'false' on error.
*
* Note, this function also results in the mft mirror runlist being completely
* mapped into memory. The mft mirror write code requires this and will BUG()
* should it find an unmapped runlist element.
*/
static bool check_mft_mirror(ntfs_volume *vol)
{
struct super_block *sb = vol->sb;
ntfs_inode *mirr_ni;
struct page *mft_page, *mirr_page;
u8 *kmft, *kmirr;
runlist_element *rl, rl2[2];
pgoff_t index;
int mrecs_per_page, i;
ntfs_debug("Entering.");
/* Compare contents of $MFT and $MFTMirr. */
mrecs_per_page = PAGE_SIZE / vol->mft_record_size;
BUG_ON(!mrecs_per_page);
BUG_ON(!vol->mftmirr_size);
mft_page = mirr_page = NULL;
kmft = kmirr = NULL;
index = i = 0;
do {
u32 bytes;
/* Switch pages if necessary. */
if (!(i % mrecs_per_page)) {
if (index) {
ntfs_unmap_page(mft_page);
ntfs_unmap_page(mirr_page);
}
/* Get the $MFT page. */
mft_page = ntfs_map_page(vol->mft_ino->i_mapping,
index);
if (IS_ERR(mft_page)) {
ntfs_error(sb, "Failed to read $MFT.");
return false;
}
kmft = page_address(mft_page);
/* Get the $MFTMirr page. */
mirr_page = ntfs_map_page(vol->mftmirr_ino->i_mapping,
index);
if (IS_ERR(mirr_page)) {
ntfs_error(sb, "Failed to read $MFTMirr.");
goto mft_unmap_out;
}
kmirr = page_address(mirr_page);
++index;
}
/* Do not check the record if it is not in use. */
if (((MFT_RECORD*)kmft)->flags & MFT_RECORD_IN_USE) {
/* Make sure the record is ok. */
if (ntfs_is_baad_recordp((le32*)kmft)) {
ntfs_error(sb, "Incomplete multi sector "
"transfer detected in mft "
"record %i.", i);
mm_unmap_out:
ntfs_unmap_page(mirr_page);
mft_unmap_out:
ntfs_unmap_page(mft_page);
return false;
}
}
/* Do not check the mirror record if it is not in use. */
if (((MFT_RECORD*)kmirr)->flags & MFT_RECORD_IN_USE) {
if (ntfs_is_baad_recordp((le32*)kmirr)) {
ntfs_error(sb, "Incomplete multi sector "
"transfer detected in mft "
"mirror record %i.", i);
goto mm_unmap_out;
}
}
/* Get the amount of data in the current record. */
bytes = le32_to_cpu(((MFT_RECORD*)kmft)->bytes_in_use);
if (bytes < sizeof(MFT_RECORD_OLD) ||
bytes > vol->mft_record_size ||
ntfs_is_baad_recordp((le32*)kmft)) {
bytes = le32_to_cpu(((MFT_RECORD*)kmirr)->bytes_in_use);
if (bytes < sizeof(MFT_RECORD_OLD) ||
bytes > vol->mft_record_size ||
ntfs_is_baad_recordp((le32*)kmirr))
bytes = vol->mft_record_size;
}
/* Compare the two records. */
if (memcmp(kmft, kmirr, bytes)) {
ntfs_error(sb, "$MFT and $MFTMirr (record %i) do not "
"match. Run ntfsfix or chkdsk.", i);
goto mm_unmap_out;
}
kmft += vol->mft_record_size;
kmirr += vol->mft_record_size;
} while (++i < vol->mftmirr_size);
/* Release the last pages. */
ntfs_unmap_page(mft_page);
ntfs_unmap_page(mirr_page);
/* Construct the mft mirror runlist by hand. */
rl2[0].vcn = 0;
rl2[0].lcn = vol->mftmirr_lcn;
rl2[0].length = (vol->mftmirr_size * vol->mft_record_size +
vol->cluster_size - 1) / vol->cluster_size;
rl2[1].vcn = rl2[0].length;
rl2[1].lcn = LCN_ENOENT;
rl2[1].length = 0;
/*
* Because we have just read all of the mft mirror, we know we have
* mapped the full runlist for it.
*/
mirr_ni = NTFS_I(vol->mftmirr_ino);
down_read(&mirr_ni->runlist.lock);
rl = mirr_ni->runlist.rl;
/* Compare the two runlists. They must be identical. */
i = 0;
do {
if (rl2[i].vcn != rl[i].vcn || rl2[i].lcn != rl[i].lcn ||
rl2[i].length != rl[i].length) {
ntfs_error(sb, "$MFTMirr location mismatch. "
"Run chkdsk.");
up_read(&mirr_ni->runlist.lock);
return false;
}
} while (rl2[i++].length);
up_read(&mirr_ni->runlist.lock);
ntfs_debug("Done.");
return true;
}
/**
* load_and_check_logfile - load and check the logfile inode for a volume
* @vol: ntfs super block describing device whose logfile to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_check_logfile(ntfs_volume *vol,
RESTART_PAGE_HEADER **rp)
{
struct inode *tmp_ino;
ntfs_debug("Entering.");
tmp_ino = ntfs_iget(vol->sb, FILE_LogFile);
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
/* Caller will display error message. */
return false;
}
if (!ntfs_check_logfile(tmp_ino, rp)) {
iput(tmp_ino);
/* ntfs_check_logfile() will have displayed error output. */
return false;
}
NInoSetSparseDisabled(NTFS_I(tmp_ino));
vol->logfile_ino = tmp_ino;
ntfs_debug("Done.");
return true;
}
#define NTFS_HIBERFIL_HEADER_SIZE 4096
/**
* check_windows_hibernation_status - check if Windows is suspended on a volume
* @vol: ntfs super block of device to check
*
* Check if Windows is hibernated on the ntfs volume @vol. This is done by
* looking for the file hiberfil.sys in the root directory of the volume. If
* the file is not present Windows is definitely not suspended.
*
* If hiberfil.sys exists and is less than 4kiB in size it means Windows is
* definitely suspended (this volume is not the system volume). Caveat: on a
* system with many volumes it is possible that the < 4kiB check is bogus but
* for now this should do fine.
*
* If hiberfil.sys exists and is larger than 4kiB in size, we need to read the
* hiberfil header (which is the first 4kiB). If this begins with "hibr",
* Windows is definitely suspended. If it is completely full of zeroes,
* Windows is definitely not hibernated. Any other case is treated as if
* Windows is suspended. This caters for the above mentioned caveat of a
* system with many volumes where no "hibr" magic would be present and there is
* no zero header.
*
* Return 0 if Windows is not hibernated on the volume, >0 if Windows is
* hibernated on the volume, and -errno on error.
*/
static int check_windows_hibernation_status(ntfs_volume *vol)
{
MFT_REF mref;
struct inode *vi;
struct page *page;
u32 *kaddr, *kend;
ntfs_name *name = NULL;
int ret = 1;
static const ntfschar hiberfil[13] = { cpu_to_le16('h'),
cpu_to_le16('i'), cpu_to_le16('b'),
cpu_to_le16('e'), cpu_to_le16('r'),
cpu_to_le16('f'), cpu_to_le16('i'),
cpu_to_le16('l'), cpu_to_le16('.'),
cpu_to_le16('s'), cpu_to_le16('y'),
cpu_to_le16('s'), 0 };
ntfs_debug("Entering.");
/*
* Find the inode number for the hibernation file by looking up the
* filename hiberfil.sys in the root directory.
*/
inode_lock(vol->root_ino);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->root_ino), hiberfil, 12,
&name);
inode_unlock(vol->root_ino);
if (IS_ERR_MREF(mref)) {
ret = MREF_ERR(mref);
/* If the file does not exist, Windows is not hibernated. */
if (ret == -ENOENT) {
ntfs_debug("hiberfil.sys not present. Windows is not "
"hibernated on the volume.");
return 0;
}
/* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"hiberfil.sys.");
return ret;
}
/* We do not care for the type of match that was found. */
kfree(name);
/* Get the inode. */
vi = ntfs_iget(vol->sb, MREF(mref));
if (IS_ERR(vi) || is_bad_inode(vi)) {
if (!IS_ERR(vi))
iput(vi);
ntfs_error(vol->sb, "Failed to load hiberfil.sys.");
return IS_ERR(vi) ? PTR_ERR(vi) : -EIO;
}
if (unlikely(i_size_read(vi) < NTFS_HIBERFIL_HEADER_SIZE)) {
ntfs_debug("hiberfil.sys is smaller than 4kiB (0x%llx). "
"Windows is hibernated on the volume. This "
"is not the system volume.", i_size_read(vi));
goto iput_out;
}
page = ntfs_map_page(vi->i_mapping, 0);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from hiberfil.sys.");
ret = PTR_ERR(page);
goto iput_out;
}
kaddr = (u32*)page_address(page);
if (*(le32*)kaddr == cpu_to_le32(0x72626968)/*'hibr'*/) {
ntfs_debug("Magic \"hibr\" found in hiberfil.sys. Windows is "
"hibernated on the volume. This is the "
"system volume.");
goto unm_iput_out;
}
kend = kaddr + NTFS_HIBERFIL_HEADER_SIZE/sizeof(*kaddr);
do {
if (unlikely(*kaddr)) {
ntfs_debug("hiberfil.sys is larger than 4kiB "
"(0x%llx), does not contain the "
"\"hibr\" magic, and does not have a "
"zero header. Windows is hibernated "
"on the volume. This is not the "
"system volume.", i_size_read(vi));
goto unm_iput_out;
}
} while (++kaddr < kend);
ntfs_debug("hiberfil.sys contains a zero header. Windows is not "
"hibernated on the volume. This is the system "
"volume.");
ret = 0;
unm_iput_out:
ntfs_unmap_page(page);
iput_out:
iput(vi);
return ret;
}
/**
* load_and_init_quota - load and setup the quota file for a volume if present
* @vol: ntfs super block describing device whose quota file to load
*
* Return 'true' on success or 'false' on error. If $Quota is not present, we
* leave vol->quota_ino as NULL and return success.
*/
static bool load_and_init_quota(ntfs_volume *vol)
{
MFT_REF mref;
struct inode *tmp_ino;
ntfs_name *name = NULL;
static const ntfschar Quota[7] = { cpu_to_le16('$'),
cpu_to_le16('Q'), cpu_to_le16('u'),
cpu_to_le16('o'), cpu_to_le16('t'),
cpu_to_le16('a'), 0 };
static ntfschar Q[3] = { cpu_to_le16('$'),
cpu_to_le16('Q'), 0 };
ntfs_debug("Entering.");
/*
* Find the inode number for the quota file by looking up the filename
* $Quota in the extended system files directory $Extend.
*/
inode_lock(vol->extend_ino);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), Quota, 6,
&name);
inode_unlock(vol->extend_ino);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, quotas are disabled and have
* never been enabled on this volume, just return success.
*/
if (MREF_ERR(mref) == -ENOENT) {
ntfs_debug("$Quota not present. Volume does not have "
"quotas enabled.");
/*
* No need to try to set quotas out of date if they are
* not enabled.
*/
NVolSetQuotaOutOfDate(vol);
return true;
}
/* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for $Quota.");
return false;
}
/* We do not care for the type of match that was found. */
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
if (IS_ERR(tmp_ino) || is_bad_inode(tmp_ino)) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $Quota.");
return false;
}
vol->quota_ino = tmp_ino;
/* Get the $Q index allocation attribute. */
tmp_ino = ntfs_index_iget(vol->quota_ino, Q, 2);
if (IS_ERR(tmp_ino)) {
ntfs_error(vol->sb, "Failed to load $Quota/$Q index.");
return false;
}
vol->quota_q_ino = tmp_ino;
ntfs_debug("Done.");
return true;
}
/**
* load_and_init_usnjrnl - load and setup the transaction log if present
* @vol: ntfs super block describing device whose usnjrnl file to load
*
* Return 'true' on success or 'false' on error.
*
* If $UsnJrnl is not present or in the process of being disabled, we set
* NVolUsnJrnlStamped() and return success.
*
* If the $UsnJrnl $DATA/$J attribute has a size equal to the lowest valid usn,
* i.e. transaction logging has only just been enabled or the journal has been
* stamped and nothing has been logged since, we also set NVolUsnJrnlStamped()
* and return success.
*/
static bool load_and_init_usnjrnl(ntfs_volume *vol)
{
MFT_REF mref;
struct inode *tmp_ino;
ntfs_inode *tmp_ni;
struct page *page;
ntfs_name *name = NULL;
USN_HEADER *uh;
static const ntfschar UsnJrnl[9] = { cpu_to_le16('$'),
cpu_to_le16('U'), cpu_to_le16('s'),
cpu_to_le16('n'), cpu_to_le16('J'),
cpu_to_le16('r'), cpu_to_le16('n'),
cpu_to_le16('l'), 0 };
static ntfschar Max[5] = { cpu_to_le16('$'),
cpu_to_le16('M'), cpu_to_le16('a'),
cpu_to_le16('x'), 0 };
static ntfschar J[3] = { cpu_to_le16('$'),
cpu_to_le16('J'), 0 };
ntfs_debug("Entering.");
/*
* Find the inode number for the transaction log file by looking up the
* filename $UsnJrnl in the extended system files directory $Extend.
*/
inode_lock(vol->extend_ino);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), UsnJrnl, 8,
&name);
inode_unlock(vol->extend_ino);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, transaction logging is disabled,
* just return success.
*/
if (MREF_ERR(mref) == -ENOENT) {
ntfs_debug("$UsnJrnl not present. Volume does not "
"have transaction logging enabled.");
not_enabled:
/*
* No need to try to stamp the transaction log if
* transaction logging is not enabled.
*/
NVolSetUsnJrnlStamped(vol);
return true;
}
/* A real error occurred. */
ntfs_error(vol->sb, "Failed to find inode number for "
"$UsnJrnl.");
return false;
}
/* We do not care for the type of match that was found. */
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
if (IS_ERR(tmp_ino) || unlikely(is_bad_inode(tmp_ino))) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $UsnJrnl.");
return false;
}
vol->usnjrnl_ino = tmp_ino;
/*
* If the transaction log is in the process of being deleted, we can
* ignore it.
*/
if (unlikely(vol->vol_flags & VOLUME_DELETE_USN_UNDERWAY)) {
ntfs_debug("$UsnJrnl in the process of being disabled. "
"Volume does not have transaction logging "
"enabled.");
goto not_enabled;
}
/* Get the $DATA/$Max attribute. */
tmp_ino = ntfs_attr_iget(vol->usnjrnl_ino, AT_DATA, Max, 4);
if (IS_ERR(tmp_ino)) {
ntfs_error(vol->sb, "Failed to load $UsnJrnl/$DATA/$Max "
"attribute.");
return false;
}
vol->usnjrnl_max_ino = tmp_ino;
if (unlikely(i_size_read(tmp_ino) < sizeof(USN_HEADER))) {
ntfs_error(vol->sb, "Found corrupt $UsnJrnl/$DATA/$Max "
"attribute (size is 0x%llx but should be at "
"least 0x%zx bytes).", i_size_read(tmp_ino),
sizeof(USN_HEADER));
return false;
}
/* Get the $DATA/$J attribute. */
tmp_ino = ntfs_attr_iget(vol->usnjrnl_ino, AT_DATA, J, 2);
if (IS_ERR(tmp_ino)) {
ntfs_error(vol->sb, "Failed to load $UsnJrnl/$DATA/$J "
"attribute.");
return false;
}
vol->usnjrnl_j_ino = tmp_ino;
/* Verify $J is non-resident and sparse. */
tmp_ni = NTFS_I(vol->usnjrnl_j_ino);
if (unlikely(!NInoNonResident(tmp_ni) || !NInoSparse(tmp_ni))) {
ntfs_error(vol->sb, "$UsnJrnl/$DATA/$J attribute is resident "
"and/or not sparse.");
return false;
}
/* Read the USN_HEADER from $DATA/$Max. */
page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from $UsnJrnl/$DATA/$Max "
"attribute.");
return false;
}
uh = (USN_HEADER*)page_address(page);
/* Sanity check the $Max. */
if (unlikely(sle64_to_cpu(uh->allocation_delta) >
sle64_to_cpu(uh->maximum_size))) {
ntfs_error(vol->sb, "Allocation delta (0x%llx) exceeds "
"maximum size (0x%llx). $UsnJrnl is corrupt.",
(long long)sle64_to_cpu(uh->allocation_delta),
(long long)sle64_to_cpu(uh->maximum_size));
ntfs_unmap_page(page);
return false;
}
/*
* If the transaction log has been stamped and nothing has been written
* to it since, we do not need to stamp it.
*/
if (unlikely(sle64_to_cpu(uh->lowest_valid_usn) >=
i_size_read(vol->usnjrnl_j_ino))) {
if (likely(sle64_to_cpu(uh->lowest_valid_usn) ==
i_size_read(vol->usnjrnl_j_ino))) {
ntfs_unmap_page(page);
ntfs_debug("$UsnJrnl is enabled but nothing has been "
"logged since it was last stamped. "
"Treating this as if the volume does "
"not have transaction logging "
"enabled.");
goto not_enabled;
}
ntfs_error(vol->sb, "$UsnJrnl has lowest valid usn (0x%llx) "
"which is out of bounds (0x%llx). $UsnJrnl "
"is corrupt.",
(long long)sle64_to_cpu(uh->lowest_valid_usn),
i_size_read(vol->usnjrnl_j_ino));
ntfs_unmap_page(page);
return false;
}
ntfs_unmap_page(page);
ntfs_debug("Done.");
return true;
}
/**
* load_and_init_attrdef - load the attribute definitions table for a volume
* @vol: ntfs super block describing device whose attrdef to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_init_attrdef(ntfs_volume *vol)
{
loff_t i_size;
struct super_block *sb = vol->sb;
struct inode *ino;
struct page *page;
pgoff_t index, max_index;
unsigned int size;
ntfs_debug("Entering.");
/* Read attrdef table and setup vol->attrdef and vol->attrdef_size. */
ino = ntfs_iget(sb, FILE_AttrDef);
if (IS_ERR(ino) || is_bad_inode(ino)) {
if (!IS_ERR(ino))
iput(ino);
goto failed;
}
NInoSetSparseDisabled(NTFS_I(ino));
/* The size of FILE_AttrDef must be above 0 and fit inside 31 bits. */
i_size = i_size_read(ino);
if (i_size <= 0 || i_size > 0x7fffffff)
goto iput_failed;
vol->attrdef = (ATTR_DEF*)ntfs_malloc_nofs(i_size);
if (!vol->attrdef)
goto iput_failed;
index = 0;
max_index = i_size >> PAGE_SHIFT;
size = PAGE_SIZE;
while (index < max_index) {
/* Read the attrdef table and copy it into the linear buffer. */
read_partial_attrdef_page:
page = ntfs_map_page(ino->i_mapping, index);
if (IS_ERR(page))
goto free_iput_failed;
memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT),
page_address(page), size);
ntfs_unmap_page(page);
}
if (size == PAGE_SIZE) {
size = i_size & ~PAGE_MASK;
if (size)
goto read_partial_attrdef_page;
}
vol->attrdef_size = i_size;
ntfs_debug("Read %llu bytes from $AttrDef.", i_size);
iput(ino);
return true;
free_iput_failed:
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
iput_failed:
iput(ino);
failed:
ntfs_error(sb, "Failed to initialize attribute definition table.");
return false;
}
#endif /* NTFS_RW */
/**
* load_and_init_upcase - load the upcase table for an ntfs volume
* @vol: ntfs super block describing device whose upcase to load
*
* Return 'true' on success or 'false' on error.
*/
static bool load_and_init_upcase(ntfs_volume *vol)
{
loff_t i_size;
struct super_block *sb = vol->sb;
struct inode *ino;
struct page *page;
pgoff_t index, max_index;
unsigned int size;
int i, max;
ntfs_debug("Entering.");
/* Read upcase table and setup vol->upcase and vol->upcase_len. */
ino = ntfs_iget(sb, FILE_UpCase);
if (IS_ERR(ino) || is_bad_inode(ino)) {
if (!IS_ERR(ino))
iput(ino);
goto upcase_failed;
}
/*
* The upcase size must not be above 64k Unicode characters, must not
* be zero and must be a multiple of sizeof(ntfschar).
*/
i_size = i_size_read(ino);
if (!i_size || i_size & (sizeof(ntfschar) - 1) ||
i_size > 64ULL * 1024 * sizeof(ntfschar))
goto iput_upcase_failed;
vol->upcase = (ntfschar*)ntfs_malloc_nofs(i_size);
if (!vol->upcase)
goto iput_upcase_failed;
index = 0;
max_index = i_size >> PAGE_SHIFT;
size = PAGE_SIZE;
while (index < max_index) {
/* Read the upcase table and copy it into the linear buffer. */
read_partial_upcase_page:
page = ntfs_map_page(ino->i_mapping, index);
if (IS_ERR(page))
goto iput_upcase_failed;
memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT),
page_address(page), size);
ntfs_unmap_page(page);
}
if (size == PAGE_SIZE) {
size = i_size & ~PAGE_MASK;
if (size)
goto read_partial_upcase_page;
}
vol->upcase_len = i_size >> UCHAR_T_SIZE_BITS;
ntfs_debug("Read %llu bytes from $UpCase (expected %zu bytes).",
i_size, 64 * 1024 * sizeof(ntfschar));
iput(ino);
mutex_lock(&ntfs_lock);
if (!default_upcase) {
ntfs_debug("Using volume specified $UpCase since default is "
"not present.");
mutex_unlock(&ntfs_lock);
return true;
}
max = default_upcase_len;
if (max > vol->upcase_len)
max = vol->upcase_len;
for (i = 0; i < max; i++)
if (vol->upcase[i] != default_upcase[i])
break;
if (i == max) {
ntfs_free(vol->upcase);
vol->upcase = default_upcase;
vol->upcase_len = max;
ntfs_nr_upcase_users++;
mutex_unlock(&ntfs_lock);
ntfs_debug("Volume specified $UpCase matches default. Using "
"default.");
return true;
}
mutex_unlock(&ntfs_lock);
ntfs_debug("Using volume specified $UpCase since it does not match "
"the default.");
return true;
iput_upcase_failed:
iput(ino);
ntfs_free(vol->upcase);
vol->upcase = NULL;
upcase_failed:
mutex_lock(&ntfs_lock);
if (default_upcase) {
vol->upcase = default_upcase;
vol->upcase_len = default_upcase_len;
ntfs_nr_upcase_users++;
mutex_unlock(&ntfs_lock);
ntfs_error(sb, "Failed to load $UpCase from the volume. Using "
"default.");
return true;
}
mutex_unlock(&ntfs_lock);
ntfs_error(sb, "Failed to initialize upcase table.");
return false;
}
/*
* The lcn and mft bitmap inodes are NTFS-internal inodes with
* their own special locking rules:
*/
static struct lock_class_key
lcnbmp_runlist_lock_key, lcnbmp_mrec_lock_key,
mftbmp_runlist_lock_key, mftbmp_mrec_lock_key;
/**
* load_system_files - open the system files using normal functions
* @vol: ntfs super block describing device whose system files to load
*
* Open the system files with normal access functions and complete setting up
* the ntfs super block @vol.
*
* Return 'true' on success or 'false' on error.
*/
static bool load_system_files(ntfs_volume *vol)
{
struct super_block *sb = vol->sb;
MFT_RECORD *m;
VOLUME_INFORMATION *vi;
ntfs_attr_search_ctx *ctx;
#ifdef NTFS_RW
RESTART_PAGE_HEADER *rp;
int err;
#endif /* NTFS_RW */
ntfs_debug("Entering.");
#ifdef NTFS_RW
/* Get mft mirror inode compare the contents of $MFT and $MFTMirr. */
if (!load_and_init_mft_mirror(vol) || !check_mft_mirror(vol)) {
static const char *es1 = "Failed to load $MFTMirr";
static const char *es2 = "$MFTMirr does not match $MFT";
static const char *es3 = ". Run ntfsfix and/or chkdsk.";
/* If a read-write mount, convert it to a read-only mount. */
if (!sb_rdonly(sb)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
!vol->mftmirr_ino ? es1 : es2,
es3);
goto iput_mirr_err_out;
}
sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s",
!vol->mftmirr_ino ? es1 : es2, es3);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s",
!vol->mftmirr_ino ? es1 : es2, es3);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
/* Get mft bitmap attribute inode. */
vol->mftbmp_ino = ntfs_attr_iget(vol->mft_ino, AT_BITMAP, NULL, 0);
if (IS_ERR(vol->mftbmp_ino)) {
ntfs_error(sb, "Failed to load $MFT/$BITMAP attribute.");
goto iput_mirr_err_out;
}
lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->runlist.lock,
&mftbmp_runlist_lock_key);
lockdep_set_class(&NTFS_I(vol->mftbmp_ino)->mrec_lock,
&mftbmp_mrec_lock_key);
/* Read upcase table and setup @vol->upcase and @vol->upcase_len. */
if (!load_and_init_upcase(vol))
goto iput_mftbmp_err_out;
#ifdef NTFS_RW
/*
* Read attribute definitions table and setup @vol->attrdef and
* @vol->attrdef_size.
*/
if (!load_and_init_attrdef(vol))
goto iput_upcase_err_out;
#endif /* NTFS_RW */
/*
* Get the cluster allocation bitmap inode and verify the size, no
* need for any locking at this stage as we are already running
* exclusively as we are mount in progress task.
*/
vol->lcnbmp_ino = ntfs_iget(sb, FILE_Bitmap);
if (IS_ERR(vol->lcnbmp_ino) || is_bad_inode(vol->lcnbmp_ino)) {
if (!IS_ERR(vol->lcnbmp_ino))
iput(vol->lcnbmp_ino);
goto bitmap_failed;
}
lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->runlist.lock,
&lcnbmp_runlist_lock_key);
lockdep_set_class(&NTFS_I(vol->lcnbmp_ino)->mrec_lock,
&lcnbmp_mrec_lock_key);
NInoSetSparseDisabled(NTFS_I(vol->lcnbmp_ino));
if ((vol->nr_clusters + 7) >> 3 > i_size_read(vol->lcnbmp_ino)) {
iput(vol->lcnbmp_ino);
bitmap_failed:
ntfs_error(sb, "Failed to load $Bitmap.");
goto iput_attrdef_err_out;
}
/*
* Get the volume inode and setup our cache of the volume flags and
* version.
*/
vol->vol_ino = ntfs_iget(sb, FILE_Volume);
if (IS_ERR(vol->vol_ino) || is_bad_inode(vol->vol_ino)) {
if (!IS_ERR(vol->vol_ino))
iput(vol->vol_ino);
volume_failed:
ntfs_error(sb, "Failed to load $Volume.");
goto iput_lcnbmp_err_out;
}
m = map_mft_record(NTFS_I(vol->vol_ino));
if (IS_ERR(m)) {
iput_volume_failed:
iput(vol->vol_ino);
goto volume_failed;
}
if (!(ctx = ntfs_attr_get_search_ctx(NTFS_I(vol->vol_ino), m))) {
ntfs_error(sb, "Failed to get attribute search context.");
goto get_ctx_vol_failed;
}
if (ntfs_attr_lookup(AT_VOLUME_INFORMATION, NULL, 0, 0, 0, NULL, 0,
ctx) || ctx->attr->non_resident || ctx->attr->flags) {
err_put_vol:
ntfs_attr_put_search_ctx(ctx);
get_ctx_vol_failed:
unmap_mft_record(NTFS_I(vol->vol_ino));
goto iput_volume_failed;
}
vi = (VOLUME_INFORMATION*)((char*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Some bounds checks. */
if ((u8*)vi < (u8*)ctx->attr || (u8*)vi +
le32_to_cpu(ctx->attr->data.resident.value_length) >
(u8*)ctx->attr + le32_to_cpu(ctx->attr->length))
goto err_put_vol;
/* Copy the volume flags and version to the ntfs_volume structure. */
vol->vol_flags = vi->flags;
vol->major_ver = vi->major_ver;
vol->minor_ver = vi->minor_ver;
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(NTFS_I(vol->vol_ino));
pr_info("volume version %i.%i.\n", vol->major_ver,
vol->minor_ver);
if (vol->major_ver < 3 && NVolSparseEnabled(vol)) {
ntfs_warning(vol->sb, "Disabling sparse support due to NTFS "
"volume version %i.%i (need at least version "
"3.0).", vol->major_ver, vol->minor_ver);
NVolClearSparseEnabled(vol);
}
#ifdef NTFS_RW
/* Make sure that no unsupported volume flags are set. */
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
static const char *es1a = "Volume is dirty";
static const char *es1b = "Volume has been modified by chkdsk";
static const char *es1c = "Volume has unsupported flags set";
static const char *es2a = ". Run chkdsk and mount in Windows.";
static const char *es2b = ". Mount in Windows.";
const char *es1, *es2;
es2 = es2a;
if (vol->vol_flags & VOLUME_IS_DIRTY)
es1 = es1a;
else if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
es1 = es1b;
es2 = es2b;
} else {
es1 = es1c;
ntfs_warning(sb, "Unsupported volume flags 0x%x "
"encountered.",
(unsigned)le16_to_cpu(vol->vol_flags));
}
/* If a read-write mount, convert it to a read-only mount. */
if (!sb_rdonly(sb)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_vol_err_out;
}
sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/*
* Do not set NVolErrors() because ntfs_remount() re-checks the
* flags which we need to do in case any flags have changed.
*/
}
/*
* Get the inode for the logfile, check it and determine if the volume
* was shutdown cleanly.
*/
rp = NULL;
if (!load_and_check_logfile(vol, &rp) ||
!ntfs_is_logfile_clean(vol->logfile_ino, rp)) {
static const char *es1a = "Failed to load $LogFile";
static const char *es1b = "$LogFile is not clean";
static const char *es2 = ". Mount in Windows.";
const char *es1;
es1 = !vol->logfile_ino ? es1a : es1b;
/* If a read-write mount, convert it to a read-only mount. */
if (!sb_rdonly(sb)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
if (vol->logfile_ino) {
BUG_ON(!rp);
ntfs_free(rp);
}
goto iput_logfile_err_out;
}
sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
ntfs_free(rp);
#endif /* NTFS_RW */
/* Get the root directory inode so we can do path lookups. */
vol->root_ino = ntfs_iget(sb, FILE_root);
if (IS_ERR(vol->root_ino) || is_bad_inode(vol->root_ino)) {
if (!IS_ERR(vol->root_ino))
iput(vol->root_ino);
ntfs_error(sb, "Failed to load root directory.");
goto iput_logfile_err_out;
}
#ifdef NTFS_RW
/*
* Check if Windows is suspended to disk on the target volume. If it
* is hibernated, we must not write *anything* to the disk so set
* NVolErrors() without setting the dirty volume flag and mount
* read-only. This will prevent read-write remounting and it will also
* prevent all writes.
*/
err = check_windows_hibernation_status(vol);
if (unlikely(err)) {
static const char *es1a = "Failed to determine if Windows is "
"hibernated";
static const char *es1b = "Windows is hibernated";
static const char *es2 = ". Run chkdsk.";
const char *es1;
es1 = err < 0 ? es1a : es1b;
/* If a read-write mount, convert it to a read-only mount. */
if (!sb_rdonly(sb)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
/* If (still) a read-write mount, mark the volume dirty. */
if (!sb_rdonly(sb) && ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
static const char *es1 = "Failed to set dirty bit in volume "
"information flags";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= SB_RDONLY;
/*
* Do not set NVolErrors() because ntfs_remount() might manage
* to set the dirty flag in which case all would be well.
*/
}
#if 0
// TODO: Enable this code once we start modifying anything that is
// different between NTFS 1.2 and 3.x...
/*
* If (still) a read-write mount, set the NT4 compatibility flag on
* newer NTFS version volumes.
*/
if (!(sb->s_flags & SB_RDONLY) && (vol->major_ver > 1) &&
ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
static const char *es1 = "Failed to set NT4 compatibility flag";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif
/* If (still) a read-write mount, empty the logfile. */
if (!sb_rdonly(sb) && !ntfs_empty_logfile(vol->logfile_ino)) {
static const char *es1 = "Failed to empty $LogFile";
static const char *es2 = ". Mount in Windows.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
/* If on NTFS versions before 3.0, we are done. */
if (unlikely(vol->major_ver < 3))
return true;
/* NTFS 3.0+ specific initialization. */
/* Get the security descriptors inode. */
vol->secure_ino = ntfs_iget(sb, FILE_Secure);
if (IS_ERR(vol->secure_ino) || is_bad_inode(vol->secure_ino)) {
if (!IS_ERR(vol->secure_ino))
iput(vol->secure_ino);
ntfs_error(sb, "Failed to load $Secure.");
goto iput_root_err_out;
}
// TODO: Initialize security.
/* Get the extended system files' directory inode. */
vol->extend_ino = ntfs_iget(sb, FILE_Extend);
if (IS_ERR(vol->extend_ino) || is_bad_inode(vol->extend_ino) ||
!S_ISDIR(vol->extend_ino->i_mode)) {
if (!IS_ERR(vol->extend_ino))
iput(vol->extend_ino);
ntfs_error(sb, "Failed to load $Extend.");
goto iput_sec_err_out;
}
#ifdef NTFS_RW
/* Find the quota file, load it if present, and set it up. */
if (!load_and_init_quota(vol)) {
static const char *es1 = "Failed to load $Quota";
static const char *es2 = ". Run chkdsk.";
/* If a read-write mount, convert it to a read-only mount. */
if (!sb_rdonly(sb)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_quota_err_out;
}
sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
/* If (still) a read-write mount, mark the quotas out of date. */
if (!sb_rdonly(sb) && !ntfs_mark_quotas_out_of_date(vol)) {
static const char *es1 = "Failed to mark quotas out of date";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_quota_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
/*
* Find the transaction log file ($UsnJrnl), load it if present, check
* it, and set it up.
*/
if (!load_and_init_usnjrnl(vol)) {
static const char *es1 = "Failed to load $UsnJrnl";
static const char *es2 = ". Run chkdsk.";
/* If a read-write mount, convert it to a read-only mount. */
if (!sb_rdonly(sb)) {
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors="
"continue nor on_errors="
"remount-ro was specified%s",
es1, es2);
goto iput_usnjrnl_err_out;
}
sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
"read-write%s", es1, es2);
/* This will prevent a read-write remount. */
NVolSetErrors(vol);
}
/* If (still) a read-write mount, stamp the transaction log. */
if (!sb_rdonly(sb) && !ntfs_stamp_usnjrnl(vol)) {
static const char *es1 = "Failed to stamp transaction log "
"($UsnJrnl)";
static const char *es2 = ". Run chkdsk.";
/* Convert to a read-only mount. */
if (!(vol->on_errors & (ON_ERRORS_REMOUNT_RO |
ON_ERRORS_CONTINUE))) {
ntfs_error(sb, "%s and neither on_errors=continue nor "
"on_errors=remount-ro was specified%s",
es1, es2);
goto iput_usnjrnl_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
return true;
#ifdef NTFS_RW
iput_usnjrnl_err_out:
iput(vol->usnjrnl_j_ino);
iput(vol->usnjrnl_max_ino);
iput(vol->usnjrnl_ino);
iput_quota_err_out:
iput(vol->quota_q_ino);
iput(vol->quota_ino);
iput(vol->extend_ino);
#endif /* NTFS_RW */
iput_sec_err_out:
iput(vol->secure_ino);
iput_root_err_out:
iput(vol->root_ino);
iput_logfile_err_out:
#ifdef NTFS_RW
iput(vol->logfile_ino);
iput_vol_err_out:
#endif /* NTFS_RW */
iput(vol->vol_ino);
iput_lcnbmp_err_out:
iput(vol->lcnbmp_ino);
iput_attrdef_err_out:
vol->attrdef_size = 0;
if (vol->attrdef) {
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
}
#ifdef NTFS_RW
iput_upcase_err_out:
#endif /* NTFS_RW */
vol->upcase_len = 0;
mutex_lock(&ntfs_lock);
if (vol->upcase == default_upcase) {
ntfs_nr_upcase_users--;
vol->upcase = NULL;
}
mutex_unlock(&ntfs_lock);
if (vol->upcase) {
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
iput_mftbmp_err_out:
iput(vol->mftbmp_ino);
iput_mirr_err_out:
#ifdef NTFS_RW
iput(vol->mftmirr_ino);
#endif /* NTFS_RW */
return false;
}
/**
* ntfs_put_super - called by the vfs to unmount a volume
* @sb: vfs superblock of volume to unmount
*
* ntfs_put_super() is called by the VFS (from fs/super.c::do_umount()) when
* the volume is being unmounted (umount system call has been invoked) and it
* releases all inodes and memory belonging to the NTFS specific part of the
* super block.
*/
static void ntfs_put_super(struct super_block *sb)
{
ntfs_volume *vol = NTFS_SB(sb);
ntfs_debug("Entering.");
#ifdef NTFS_RW
/*
* Commit all inodes while they are still open in case some of them
* cause others to be dirtied.
*/
ntfs_commit_inode(vol->vol_ino);
/* NTFS 3.0+ specific. */
if (vol->major_ver >= 3) {
if (vol->usnjrnl_j_ino)
ntfs_commit_inode(vol->usnjrnl_j_ino);
if (vol->usnjrnl_max_ino)
ntfs_commit_inode(vol->usnjrnl_max_ino);
if (vol->usnjrnl_ino)
ntfs_commit_inode(vol->usnjrnl_ino);
if (vol->quota_q_ino)
ntfs_commit_inode(vol->quota_q_ino);
if (vol->quota_ino)
ntfs_commit_inode(vol->quota_ino);
if (vol->extend_ino)
ntfs_commit_inode(vol->extend_ino);
if (vol->secure_ino)
ntfs_commit_inode(vol->secure_ino);
}
ntfs_commit_inode(vol->root_ino);
down_write(&vol->lcnbmp_lock);
ntfs_commit_inode(vol->lcnbmp_ino);
up_write(&vol->lcnbmp_lock);
down_write(&vol->mftbmp_lock);
ntfs_commit_inode(vol->mftbmp_ino);
up_write(&vol->mftbmp_lock);
if (vol->logfile_ino)
ntfs_commit_inode(vol->logfile_ino);
if (vol->mftmirr_ino)
ntfs_commit_inode(vol->mftmirr_ino);
ntfs_commit_inode(vol->mft_ino);
/*
* If a read-write mount and no volume errors have occurred, mark the
* volume clean. Also, re-commit all affected inodes.
*/
if (!sb_rdonly(sb)) {
if (!NVolErrors(vol)) {
if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
ntfs_warning(sb, "Failed to clear dirty bit "
"in volume information "
"flags. Run chkdsk.");
ntfs_commit_inode(vol->vol_ino);
ntfs_commit_inode(vol->root_ino);
if (vol->mftmirr_ino)
ntfs_commit_inode(vol->mftmirr_ino);
ntfs_commit_inode(vol->mft_ino);
} else {
ntfs_warning(sb, "Volume has errors. Leaving volume "
"marked dirty. Run chkdsk.");
}
}
#endif /* NTFS_RW */
iput(vol->vol_ino);
vol->vol_ino = NULL;
/* NTFS 3.0+ specific clean up. */
if (vol->major_ver >= 3) {
#ifdef NTFS_RW
if (vol->usnjrnl_j_ino) {
iput(vol->usnjrnl_j_ino);
vol->usnjrnl_j_ino = NULL;
}
if (vol->usnjrnl_max_ino) {
iput(vol->usnjrnl_max_ino);
vol->usnjrnl_max_ino = NULL;
}
if (vol->usnjrnl_ino) {
iput(vol->usnjrnl_ino);
vol->usnjrnl_ino = NULL;
}
if (vol->quota_q_ino) {
iput(vol->quota_q_ino);
vol->quota_q_ino = NULL;
}
if (vol->quota_ino) {
iput(vol->quota_ino);
vol->quota_ino = NULL;
}
#endif /* NTFS_RW */
if (vol->extend_ino) {
iput(vol->extend_ino);
vol->extend_ino = NULL;
}
if (vol->secure_ino) {
iput(vol->secure_ino);
vol->secure_ino = NULL;
}
}
iput(vol->root_ino);
vol->root_ino = NULL;
down_write(&vol->lcnbmp_lock);
iput(vol->lcnbmp_ino);
vol->lcnbmp_ino = NULL;
up_write(&vol->lcnbmp_lock);
down_write(&vol->mftbmp_lock);
iput(vol->mftbmp_ino);
vol->mftbmp_ino = NULL;
up_write(&vol->mftbmp_lock);
#ifdef NTFS_RW
if (vol->logfile_ino) {
iput(vol->logfile_ino);
vol->logfile_ino = NULL;
}
if (vol->mftmirr_ino) {
/* Re-commit the mft mirror and mft just in case. */
ntfs_commit_inode(vol->mftmirr_ino);
ntfs_commit_inode(vol->mft_ino);
iput(vol->mftmirr_ino);
vol->mftmirr_ino = NULL;
}
/*
* We should have no dirty inodes left, due to
* mft.c::ntfs_mft_writepage() cleaning all the dirty pages as
* the underlying mft records are written out and cleaned.
*/
ntfs_commit_inode(vol->mft_ino);
write_inode_now(vol->mft_ino, 1);
#endif /* NTFS_RW */
iput(vol->mft_ino);
vol->mft_ino = NULL;
/* Throw away the table of attribute definitions. */
vol->attrdef_size = 0;
if (vol->attrdef) {
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
}
vol->upcase_len = 0;
/*
* Destroy the global default upcase table if necessary. Also decrease
* the number of upcase users if we are a user.
*/
mutex_lock(&ntfs_lock);
if (vol->upcase == default_upcase) {
ntfs_nr_upcase_users--;
vol->upcase = NULL;
}
if (!ntfs_nr_upcase_users && default_upcase) {
ntfs_free(default_upcase);
default_upcase = NULL;
}
if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
free_compression_buffers();
mutex_unlock(&ntfs_lock);
if (vol->upcase) {
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
unload_nls(vol->nls_map);
sb->s_fs_info = NULL;
kfree(vol);
}
/**
* get_nr_free_clusters - return the number of free clusters on a volume
* @vol: ntfs volume for which to obtain free cluster count
*
* Calculate the number of free clusters on the mounted NTFS volume @vol. We
* actually calculate the number of clusters in use instead because this
* allows us to not care about partial pages as these will be just zero filled
* and hence not be counted as allocated clusters.
*
* The only particularity is that clusters beyond the end of the logical ntfs
* volume will be marked as allocated to prevent errors which means we have to
* discount those at the end. This is important as the cluster bitmap always
* has a size in multiples of 8 bytes, i.e. up to 63 clusters could be outside
* the logical volume and marked in use when they are not as they do not exist.
*
* If any pages cannot be read we assume all clusters in the erroring pages are
* in use. This means we return an underestimate on errors which is better than
* an overestimate.
*/
static s64 get_nr_free_clusters(ntfs_volume *vol)
{
s64 nr_free = vol->nr_clusters;
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
struct page *page;
pgoff_t index, max_index;
ntfs_debug("Entering.");
/* Serialize accesses to the cluster bitmap. */
down_read(&vol->lcnbmp_lock);
/*
* Convert the number of bits into bytes rounded up, then convert into
* multiples of PAGE_SIZE, rounding up so that if we have one
* full and one partial page max_index = 2.
*/
max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
/* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
unsigned long *kaddr;
/*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_SIZE * 8;
continue;
}
kaddr = kmap_atomic(page);
/*
* Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/
nr_free -= bitmap_weight(kaddr,
PAGE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr);
put_page(page);
}
ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
/*
* Fixup for eventual bits outside logical ntfs volume (see function
* description above).
*/
if (vol->nr_clusters & 63)
nr_free += 64 - (vol->nr_clusters & 63);
up_read(&vol->lcnbmp_lock);
/* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
return nr_free;
}
/**
* __get_nr_free_mft_records - return the number of free inodes on a volume
* @vol: ntfs volume for which to obtain free inode count
* @nr_free: number of mft records in filesystem
* @max_index: maximum number of pages containing set bits
*
* Calculate the number of free mft records (inodes) on the mounted NTFS
* volume @vol. We actually calculate the number of mft records in use instead
* because this allows us to not care about partial pages as these will be just
* zero filled and hence not be counted as allocated mft record.
*
* If any pages cannot be read we assume all mft records in the erroring pages
* are in use. This means we return an underestimate on errors which is better
* than an overestimate.
*
* NOTE: Caller must hold mftbmp_lock rw_semaphore for reading or writing.
*/
static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
s64 nr_free, const pgoff_t max_index)
{
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
struct page *page;
pgoff_t index;
ntfs_debug("Entering.");
/* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
"0x%lx.", max_index, PAGE_SIZE / 4);
for (index = 0; index < max_index; index++) {
unsigned long *kaddr;
/*
* Read the page from page cache, getting it from backing store
* if necessary, and increment the use count.
*/
page = read_mapping_page(mapping, index, NULL);
/* Ignore pages which errored synchronously. */
if (IS_ERR(page)) {
ntfs_debug("read_mapping_page() error. Skipping "
"page (index 0x%lx).", index);
nr_free -= PAGE_SIZE * 8;
continue;
}
kaddr = kmap_atomic(page);
/*
* Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as
* it just means we do a little extra work but it won't affect
* the result as all out of range bytes are set to zero by
* ntfs_readpage().
*/
nr_free -= bitmap_weight(kaddr,
PAGE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr);
put_page(page);
}
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
index - 1);
/* If errors occurred we may well have gone below zero, fix this. */
if (nr_free < 0)
nr_free = 0;
ntfs_debug("Exiting.");
return nr_free;
}
/**
* ntfs_statfs - return information about mounted NTFS volume
* @dentry: dentry from mounted volume
* @sfs: statfs structure in which to return the information
*
* Return information about the mounted NTFS volume @dentry in the statfs structure
* pointed to by @sfs (this is initialized with zeros before ntfs_statfs is
* called). We interpret the values to be correct of the moment in time at
* which we are called. Most values are variable otherwise and this isn't just
* the free values but the totals as well. For example we can increase the
* total number of file nodes if we run out and we can keep doing this until
* there is no more space on the volume left at all.
*
* Called from vfs_statfs which is used to handle the statfs, fstatfs, and
* ustat system calls.
*
* Return 0 on success or -errno on error.
*/
static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
{
struct super_block *sb = dentry->d_sb;
s64 size;
ntfs_volume *vol = NTFS_SB(sb);
ntfs_inode *mft_ni = NTFS_I(vol->mft_ino);
pgoff_t max_index;
unsigned long flags;
ntfs_debug("Entering.");
/* Type of filesystem. */
sfs->f_type = NTFS_SB_MAGIC;
/* Optimal transfer block size. */
sfs->f_bsize = PAGE_SIZE;
/*
* Total data blocks in filesystem in units of f_bsize and since
* inodes are also stored in data blocs ($MFT is a file) this is just
* the total clusters.
*/
sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >>
PAGE_SHIFT;
/* Free data blocks in filesystem in units of f_bsize. */
size = get_nr_free_clusters(vol) << vol->cluster_size_bits >>
PAGE_SHIFT;
if (size < 0LL)
size = 0LL;
/* Free blocks avail to non-superuser, same as above on NTFS. */
sfs->f_bavail = sfs->f_bfree = size;
/* Serialize accesses to the inode bitmap. */
down_read(&vol->mftbmp_lock);
read_lock_irqsave(&mft_ni->size_lock, flags);
size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
/*
* Convert the maximum number of set bits into bytes rounded up, then
* convert into multiples of PAGE_SIZE, rounding up so that if we
* have one full and one partial page max_index = 2.
*/
max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
+ 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
/* Number of inodes in filesystem (at this point in time). */
sfs->f_files = size;
/* Free inodes in fs (based on current total count). */
sfs->f_ffree = __get_nr_free_mft_records(vol, size, max_index);
up_read(&vol->mftbmp_lock);
/*
* File system id. This is extremely *nix flavour dependent and even
* within Linux itself all fs do their own thing. I interpret this to
* mean a unique id associated with the mounted fs and not the id
* associated with the filesystem driver, the latter is already given
* by the filesystem type in sfs->f_type. Thus we use the 64-bit
* volume serial number splitting it into two 32-bit parts. We enter
* the least significant 32-bits in f_fsid[0] and the most significant
* 32-bits in f_fsid[1].
*/
sfs->f_fsid = u64_to_fsid(vol->serial_no);
/* Maximum length of filenames. */
sfs->f_namelen = NTFS_MAX_NAME_LEN;
return 0;
}
#ifdef NTFS_RW
static int ntfs_write_inode(struct inode *vi, struct writeback_control *wbc)
{
return __ntfs_write_inode(vi, wbc->sync_mode == WB_SYNC_ALL);
}
#endif
/*
* The complete super operations.
*/
static const struct super_operations ntfs_sops = {
.alloc_inode = ntfs_alloc_big_inode, /* VFS: Allocate new inode. */
.free_inode = ntfs_free_big_inode, /* VFS: Deallocate inode. */
#ifdef NTFS_RW
.write_inode = ntfs_write_inode, /* VFS: Write dirty inode to
disk. */
#endif /* NTFS_RW */
.put_super = ntfs_put_super, /* Syscall: umount. */
.statfs = ntfs_statfs, /* Syscall: statfs */
.remount_fs = ntfs_remount, /* Syscall: mount -o remount. */
.evict_inode = ntfs_evict_big_inode, /* VFS: Called when an inode is
removed from memory. */
.show_options = ntfs_show_options, /* Show mount options in
proc. */
};
/**
* ntfs_fill_super - mount an ntfs filesystem
* @sb: super block of ntfs filesystem to mount
* @opt: string containing the mount options
* @silent: silence error output
*
* ntfs_fill_super() is called by the VFS to mount the device described by @sb
* with the mount otions in @data with the NTFS filesystem.
*
* If @silent is true, remain silent even if errors are detected. This is used
* during bootup, when the kernel tries to mount the root filesystem with all
* registered filesystems one after the other until one succeeds. This implies
* that all filesystems except the correct one will quite correctly and
* expectedly return an error, but nobody wants to see error messages when in
* fact this is what is supposed to happen.
*
* NOTE: @sb->s_flags contains the mount options flags.
*/
static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
{
ntfs_volume *vol;
struct buffer_head *bh;
struct inode *tmp_ino;
int blocksize, result;
/*
* We do a pretty difficult piece of bootstrap by reading the
* MFT (and other metadata) from disk into memory. We'll only
* release this metadata during umount, so the locking patterns
* observed during bootstrap do not count. So turn off the
* observation of locking patterns (strictly for this context
* only) while mounting NTFS. [The validator is still active
* otherwise, even for this context: it will for example record
* lock class registrations.]
*/
lockdep_off();
ntfs_debug("Entering.");
#ifndef NTFS_RW
sb->s_flags |= SB_RDONLY;
#endif /* ! NTFS_RW */
/* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
vol = NTFS_SB(sb);
if (!vol) {
if (!silent)
ntfs_error(sb, "Allocation of NTFS volume structure "
"failed. Aborting mount...");
lockdep_on();
return -ENOMEM;
}
/* Initialize ntfs_volume structure. */
*vol = (ntfs_volume) {
.sb = sb,
/*
* Default is group and other don't have any access to files or
* directories while owner has full access. Further, files by
* default are not executable but directories are of course
* browseable.
*/
.fmask = 0177,
.dmask = 0077,
};
init_rwsem(&vol->mftbmp_lock);
init_rwsem(&vol->lcnbmp_lock);
/* By default, enable sparse support. */
NVolSetSparseEnabled(vol);
/* Important to get the mount options dealt with now. */
if (!parse_options(vol, (char*)opt))
goto err_out_now;
/* We support sector sizes up to the PAGE_SIZE. */
if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
if (!silent)
ntfs_error(sb, "Device has unsupported sector size "
"(%i). The maximum supported sector "
"size on this architecture is %lu "
"bytes.",
bdev_logical_block_size(sb->s_bdev),
PAGE_SIZE);
goto err_out_now;
}
/*
* Setup the device access block size to NTFS_BLOCK_SIZE or the hard
* sector size, whichever is bigger.
*/
blocksize = sb_min_blocksize(sb, NTFS_BLOCK_SIZE);
if (blocksize < NTFS_BLOCK_SIZE) {
if (!silent)
ntfs_error(sb, "Unable to set device block size.");
goto err_out_now;
}
BUG_ON(blocksize != sb->s_blocksize);
ntfs_debug("Set device block size to %i bytes (block size bits %i).",
blocksize, sb->s_blocksize_bits);
/* Determine the size of the device in units of block_size bytes. */
vol->nr_blocks = sb_bdev_nr_blocks(sb);
if (!vol->nr_blocks) {
if (!silent)
ntfs_error(sb, "Unable to determine device size.");
goto err_out_now;
}
/* Read the boot sector and return unlocked buffer head to it. */
if (!(bh = read_ntfs_boot_sector(sb, silent))) {
if (!silent)
ntfs_error(sb, "Not an NTFS volume.");
goto err_out_now;
}
/*
* Extract the data from the boot sector and setup the ntfs volume
* using it.
*/
result = parse_ntfs_boot_sector(vol, (NTFS_BOOT_SECTOR*)bh->b_data);
brelse(bh);
if (!result) {
if (!silent)
ntfs_error(sb, "Unsupported NTFS filesystem.");
goto err_out_now;
}
/*
* If the boot sector indicates a sector size bigger than the current
* device block size, switch the device block size to the sector size.
* TODO: It may be possible to support this case even when the set
* below fails, we would just be breaking up the i/o for each sector
* into multiple blocks for i/o purposes but otherwise it should just
* work. However it is safer to leave disabled until someone hits this
* error message and then we can get them to try it without the setting
* so we know for sure that it works.
*/
if (vol->sector_size > blocksize) {
blocksize = sb_set_blocksize(sb, vol->sector_size);
if (blocksize != vol->sector_size) {
if (!silent)
ntfs_error(sb, "Unable to set device block "
"size to sector size (%i).",
vol->sector_size);
goto err_out_now;
}
BUG_ON(blocksize != sb->s_blocksize);
vol->nr_blocks = sb_bdev_nr_blocks(sb);
ntfs_debug("Changed device block size to %i bytes (block size "
"bits %i) to match volume sector size.",
blocksize, sb->s_blocksize_bits);
}
/* Initialize the cluster and mft allocators. */
ntfs_setup_allocators(vol);
/* Setup remaining fields in the super block. */
sb->s_magic = NTFS_SB_MAGIC;
/*
* Ntfs allows 63 bits for the file size, i.e. correct would be:
* sb->s_maxbytes = ~0ULL >> 1;
* But the kernel uses a long as the page cache page index which on
* 32-bit architectures is only 32-bits. MAX_LFS_FILESIZE is kernel
* defined to the maximum the page cache page index can cope with
* without overflowing the index or to 2^63 - 1, whichever is smaller.
*/
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Ntfs measures time in 100ns intervals. */
sb->s_time_gran = 100;
/*
* Now load the metadata required for the page cache and our address
* space operations to function. We do this by setting up a specialised
* read_inode method and then just calling the normal iget() to obtain
* the inode for $MFT which is sufficient to allow our normal inode
* operations and associated address space operations to function.
*/
sb->s_op = &ntfs_sops;
tmp_ino = new_inode(sb);
if (!tmp_ino) {
if (!silent)
ntfs_error(sb, "Failed to load essential metadata.");
goto err_out_now;
}
tmp_ino->i_ino = FILE_MFT;
insert_inode_hash(tmp_ino);
if (ntfs_read_inode_mount(tmp_ino) < 0) {
if (!silent)
ntfs_error(sb, "Failed to load essential metadata.");
goto iput_tmp_ino_err_out_now;
}
mutex_lock(&ntfs_lock);
/*
* The current mount is a compression user if the cluster size is
* less than or equal 4kiB.
*/
if (vol->cluster_size <= 4096 && !ntfs_nr_compression_users++) {
result = allocate_compression_buffers();
if (result) {
ntfs_error(NULL, "Failed to allocate buffers "
"for compression engine.");
ntfs_nr_compression_users--;
mutex_unlock(&ntfs_lock);
goto iput_tmp_ino_err_out_now;
}
}
/*
* Generate the global default upcase table if necessary. Also
* temporarily increment the number of upcase users to avoid race
* conditions with concurrent (u)mounts.
*/
if (!default_upcase)
default_upcase = generate_default_upcase();
ntfs_nr_upcase_users++;
mutex_unlock(&ntfs_lock);
/*
* From now on, ignore @silent parameter. If we fail below this line,
* it will be due to a corrupt fs or a system error, so we report it.
*/
/*
* Open the system files with normal access functions and complete
* setting up the ntfs super block.
*/
if (!load_system_files(vol)) {
ntfs_error(sb, "Failed to load system files.");
goto unl_upcase_iput_tmp_ino_err_out_now;
}
/* We grab a reference, simulating an ntfs_iget(). */
ihold(vol->root_ino);
if ((sb->s_root = d_make_root(vol->root_ino))) {
ntfs_debug("Exiting, status successful.");
/* Release the default upcase if it has no users. */
mutex_lock(&ntfs_lock);
if (!--ntfs_nr_upcase_users && default_upcase) {
ntfs_free(default_upcase);
default_upcase = NULL;
}
mutex_unlock(&ntfs_lock);
sb->s_export_op = &ntfs_export_ops;
lockdep_on();
return 0;
}
ntfs_error(sb, "Failed to allocate root directory.");
/* Clean up after the successful load_system_files() call from above. */
// TODO: Use ntfs_put_super() instead of repeating all this code...
// FIXME: Should mark the volume clean as the error is most likely
// -ENOMEM.
iput(vol->vol_ino);
vol->vol_ino = NULL;
/* NTFS 3.0+ specific clean up. */
if (vol->major_ver >= 3) {
#ifdef NTFS_RW
if (vol->usnjrnl_j_ino) {
iput(vol->usnjrnl_j_ino);
vol->usnjrnl_j_ino = NULL;
}
if (vol->usnjrnl_max_ino) {
iput(vol->usnjrnl_max_ino);
vol->usnjrnl_max_ino = NULL;
}
if (vol->usnjrnl_ino) {
iput(vol->usnjrnl_ino);
vol->usnjrnl_ino = NULL;
}
if (vol->quota_q_ino) {
iput(vol->quota_q_ino);
vol->quota_q_ino = NULL;
}
if (vol->quota_ino) {
iput(vol->quota_ino);
vol->quota_ino = NULL;
}
#endif /* NTFS_RW */
if (vol->extend_ino) {
iput(vol->extend_ino);
vol->extend_ino = NULL;
}
if (vol->secure_ino) {
iput(vol->secure_ino);
vol->secure_ino = NULL;
}
}
iput(vol->root_ino);
vol->root_ino = NULL;
iput(vol->lcnbmp_ino);
vol->lcnbmp_ino = NULL;
iput(vol->mftbmp_ino);
vol->mftbmp_ino = NULL;
#ifdef NTFS_RW
if (vol->logfile_ino) {
iput(vol->logfile_ino);
vol->logfile_ino = NULL;
}
if (vol->mftmirr_ino) {
iput(vol->mftmirr_ino);
vol->mftmirr_ino = NULL;
}
#endif /* NTFS_RW */
/* Throw away the table of attribute definitions. */
vol->attrdef_size = 0;
if (vol->attrdef) {
ntfs_free(vol->attrdef);
vol->attrdef = NULL;
}
vol->upcase_len = 0;
mutex_lock(&ntfs_lock);
if (vol->upcase == default_upcase) {
ntfs_nr_upcase_users--;
vol->upcase = NULL;
}
mutex_unlock(&ntfs_lock);
if (vol->upcase) {
ntfs_free(vol->upcase);
vol->upcase = NULL;
}
if (vol->nls_map) {
unload_nls(vol->nls_map);
vol->nls_map = NULL;
}
/* Error exit code path. */
unl_upcase_iput_tmp_ino_err_out_now:
/*
* Decrease the number of upcase users and destroy the global default
* upcase table if necessary.
*/
mutex_lock(&ntfs_lock);
if (!--ntfs_nr_upcase_users && default_upcase) {
ntfs_free(default_upcase);
default_upcase = NULL;
}
if (vol->cluster_size <= 4096 && !--ntfs_nr_compression_users)
free_compression_buffers();
mutex_unlock(&ntfs_lock);
iput_tmp_ino_err_out_now:
iput(tmp_ino);
if (vol->mft_ino && vol->mft_ino != tmp_ino)
iput(vol->mft_ino);
vol->mft_ino = NULL;
/* Errors at this stage are irrelevant. */
err_out_now:
sb->s_fs_info = NULL;
kfree(vol);
ntfs_debug("Failed, returning -EINVAL.");
lockdep_on();
return -EINVAL;
}
/*
* This is a slab cache to optimize allocations and deallocations of Unicode
* strings of the maximum length allowed by NTFS, which is NTFS_MAX_NAME_LEN
* (255) Unicode characters + a terminating NULL Unicode character.
*/
struct kmem_cache *ntfs_name_cache;
/* Slab caches for efficient allocation/deallocation of inodes. */
struct kmem_cache *ntfs_inode_cache;
struct kmem_cache *ntfs_big_inode_cache;
/* Init once constructor for the inode slab cache. */
static void ntfs_big_inode_init_once(void *foo)
{
ntfs_inode *ni = (ntfs_inode *)foo;
inode_init_once(VFS_I(ni));
}
/*
* Slab caches to optimize allocations and deallocations of attribute search
* contexts and index contexts, respectively.
*/
struct kmem_cache *ntfs_attr_ctx_cache;
struct kmem_cache *ntfs_index_ctx_cache;
/* Driver wide mutex. */
DEFINE_MUTEX(ntfs_lock);
static struct dentry *ntfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
}
static struct file_system_type ntfs_fs_type = {
.owner = THIS_MODULE,
.name = "ntfs",
.mount = ntfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ntfs");
/* Stable names for the slab caches. */
static const char ntfs_index_ctx_cache_name[] = "ntfs_index_ctx_cache";
static const char ntfs_attr_ctx_cache_name[] = "ntfs_attr_ctx_cache";
static const char ntfs_name_cache_name[] = "ntfs_name_cache";
static const char ntfs_inode_cache_name[] = "ntfs_inode_cache";
static const char ntfs_big_inode_cache_name[] = "ntfs_big_inode_cache";
static int __init init_ntfs_fs(void)
{
int err = 0;
/* This may be ugly but it results in pretty output so who cares. (-8 */
pr_info("driver " NTFS_VERSION " [Flags: R/"
#ifdef NTFS_RW
"W"
#else
"O"
#endif
#ifdef DEBUG
" DEBUG"
#endif
#ifdef MODULE
" MODULE"
#endif
"].\n");
ntfs_debug("Debug messages are enabled.");
ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name,
sizeof(ntfs_index_context), 0 /* offset */,
SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_index_ctx_cache) {
pr_crit("Failed to create %s!\n", ntfs_index_ctx_cache_name);
goto ictx_err_out;
}
ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name,
sizeof(ntfs_attr_search_ctx), 0 /* offset */,
SLAB_HWCACHE_ALIGN, NULL /* ctor */);
if (!ntfs_attr_ctx_cache) {
pr_crit("NTFS: Failed to create %s!\n",
ntfs_attr_ctx_cache_name);
goto actx_err_out;
}
ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name,
(NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ntfs_name_cache) {
pr_crit("Failed to create %s!\n", ntfs_name_cache_name);
goto name_err_out;
}
ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name,
sizeof(ntfs_inode), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!ntfs_inode_cache) {
pr_crit("Failed to create %s!\n", ntfs_inode_cache_name);
goto inode_err_out;
}
ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name,
sizeof(big_ntfs_inode), 0,
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
SLAB_ACCOUNT, ntfs_big_inode_init_once);
if (!ntfs_big_inode_cache) {
pr_crit("Failed to create %s!\n", ntfs_big_inode_cache_name);
goto big_inode_err_out;
}
/* Register the ntfs sysctls. */
err = ntfs_sysctl(1);
if (err) {
pr_crit("Failed to register NTFS sysctls!\n");
goto sysctl_err_out;
}
err = register_filesystem(&ntfs_fs_type);
if (!err) {
ntfs_debug("NTFS driver registered successfully.");
return 0; /* Success! */
}
pr_crit("Failed to register NTFS filesystem driver!\n");
/* Unregister the ntfs sysctls. */
ntfs_sysctl(0);
sysctl_err_out:
kmem_cache_destroy(ntfs_big_inode_cache);
big_inode_err_out:
kmem_cache_destroy(ntfs_inode_cache);
inode_err_out:
kmem_cache_destroy(ntfs_name_cache);
name_err_out:
kmem_cache_destroy(ntfs_attr_ctx_cache);
actx_err_out:
kmem_cache_destroy(ntfs_index_ctx_cache);
ictx_err_out:
if (!err) {
pr_crit("Aborting NTFS filesystem driver registration...\n");
err = -ENOMEM;
}
return err;
}
static void __exit exit_ntfs_fs(void)
{
ntfs_debug("Unregistering NTFS driver.");
unregister_filesystem(&ntfs_fs_type);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(ntfs_big_inode_cache);
kmem_cache_destroy(ntfs_inode_cache);
kmem_cache_destroy(ntfs_name_cache);
kmem_cache_destroy(ntfs_attr_ctx_cache);
kmem_cache_destroy(ntfs_index_ctx_cache);
/* Unregister the ntfs sysctls. */
ntfs_sysctl(0);
}
MODULE_AUTHOR("Anton Altaparmakov <[email protected]>");
MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.");
MODULE_VERSION(NTFS_VERSION);
MODULE_LICENSE("GPL");
#ifdef DEBUG
module_param(debug_msgs, bint, 0);
MODULE_PARM_DESC(debug_msgs, "Enable debug messages.");
#endif
module_init(init_ntfs_fs)
module_exit(exit_ntfs_fs)
| linux-master | fs/ntfs/super.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* collate.c - NTFS kernel collation handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004 Anton Altaparmakov
*/
#include "collate.h"
#include "debug.h"
#include "ntfs.h"
static int ntfs_collate_binary(ntfs_volume *vol,
const void *data1, const int data1_len,
const void *data2, const int data2_len)
{
int rc;
ntfs_debug("Entering.");
rc = memcmp(data1, data2, min(data1_len, data2_len));
if (!rc && (data1_len != data2_len)) {
if (data1_len < data2_len)
rc = -1;
else
rc = 1;
}
ntfs_debug("Done, returning %i", rc);
return rc;
}
static int ntfs_collate_ntofs_ulong(ntfs_volume *vol,
const void *data1, const int data1_len,
const void *data2, const int data2_len)
{
int rc;
u32 d1, d2;
ntfs_debug("Entering.");
// FIXME: We don't really want to bug here.
BUG_ON(data1_len != data2_len);
BUG_ON(data1_len != 4);
d1 = le32_to_cpup(data1);
d2 = le32_to_cpup(data2);
if (d1 < d2)
rc = -1;
else {
if (d1 == d2)
rc = 0;
else
rc = 1;
}
ntfs_debug("Done, returning %i", rc);
return rc;
}
typedef int (*ntfs_collate_func_t)(ntfs_volume *, const void *, const int,
const void *, const int);
static ntfs_collate_func_t ntfs_do_collate0x0[3] = {
ntfs_collate_binary,
NULL/*ntfs_collate_file_name*/,
NULL/*ntfs_collate_unicode_string*/,
};
static ntfs_collate_func_t ntfs_do_collate0x1[4] = {
ntfs_collate_ntofs_ulong,
NULL/*ntfs_collate_ntofs_sid*/,
NULL/*ntfs_collate_ntofs_security_hash*/,
NULL/*ntfs_collate_ntofs_ulongs*/,
};
/**
* ntfs_collate - collate two data items using a specified collation rule
* @vol: ntfs volume to which the data items belong
* @cr: collation rule to use when comparing the items
* @data1: first data item to collate
* @data1_len: length in bytes of @data1
* @data2: second data item to collate
* @data2_len: length in bytes of @data2
*
* Collate the two data items @data1 and @data2 using the collation rule @cr
* and return -1, 0, ir 1 if @data1 is found, respectively, to collate before,
* to match, or to collate after @data2.
*
* For speed we use the collation rule @cr as an index into two tables of
* function pointers to call the appropriate collation function.
*/
int ntfs_collate(ntfs_volume *vol, COLLATION_RULE cr,
const void *data1, const int data1_len,
const void *data2, const int data2_len) {
int i;
ntfs_debug("Entering.");
/*
* FIXME: At the moment we only support COLLATION_BINARY and
* COLLATION_NTOFS_ULONG, so we BUG() for everything else for now.
*/
BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG);
i = le32_to_cpu(cr);
BUG_ON(i < 0);
if (i <= 0x02)
return ntfs_do_collate0x0[i](vol, data1, data1_len,
data2, data2_len);
BUG_ON(i < 0x10);
i -= 0x10;
if (likely(i <= 3))
return ntfs_do_collate0x1[i](vol, data1, data1_len,
data2, data2_len);
BUG();
return 0;
}
| linux-master | fs/ntfs/collate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* compress.c - NTFS kernel compressed attributes handling.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*/
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "attrib.h"
#include "inode.h"
#include "debug.h"
#include "ntfs.h"
/**
* ntfs_compression_constants - enum of constants used in the compression code
*/
typedef enum {
/* Token types and access mask. */
NTFS_SYMBOL_TOKEN = 0,
NTFS_PHRASE_TOKEN = 1,
NTFS_TOKEN_MASK = 1,
/* Compression sub-block constants. */
NTFS_SB_SIZE_MASK = 0x0fff,
NTFS_SB_SIZE = 0x1000,
NTFS_SB_IS_COMPRESSED = 0x8000,
/*
* The maximum compression block size is by definition 16 * the cluster
* size, with the maximum supported cluster size being 4kiB. Thus the
* maximum compression buffer size is 64kiB, so we use this when
* initializing the compression buffer.
*/
NTFS_MAX_CB_SIZE = 64 * 1024,
} ntfs_compression_constants;
/*
* ntfs_compression_buffer - one buffer for the decompression engine
*/
static u8 *ntfs_compression_buffer;
/*
* ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
*/
static DEFINE_SPINLOCK(ntfs_cb_lock);
/**
* allocate_compression_buffers - allocate the decompression buffers
*
* Caller has to hold the ntfs_lock mutex.
*
* Return 0 on success or -ENOMEM if the allocations failed.
*/
int allocate_compression_buffers(void)
{
BUG_ON(ntfs_compression_buffer);
ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
if (!ntfs_compression_buffer)
return -ENOMEM;
return 0;
}
/**
* free_compression_buffers - free the decompression buffers
*
* Caller has to hold the ntfs_lock mutex.
*/
void free_compression_buffers(void)
{
BUG_ON(!ntfs_compression_buffer);
vfree(ntfs_compression_buffer);
ntfs_compression_buffer = NULL;
}
/**
* zero_partial_compressed_page - zero out of bounds compressed page region
*/
static void zero_partial_compressed_page(struct page *page,
const s64 initialized_size)
{
u8 *kp = page_address(page);
unsigned int kp_ofs;
ntfs_debug("Zeroing page region outside initialized size.");
if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
clear_page(kp);
return;
}
kp_ofs = initialized_size & ~PAGE_MASK;
memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
return;
}
/**
* handle_bounds_compressed_page - test for&handle out of bounds compressed page
*/
static inline void handle_bounds_compressed_page(struct page *page,
const loff_t i_size, const s64 initialized_size)
{
if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
(initialized_size < i_size))
zero_partial_compressed_page(page, initialized_size);
return;
}
/**
* ntfs_decompress - decompress a compression block into an array of pages
* @dest_pages: destination array of pages
* @completed_pages: scratch space to track completed pages
* @dest_index: current index into @dest_pages (IN/OUT)
* @dest_ofs: current offset within @dest_pages[@dest_index] (IN/OUT)
* @dest_max_index: maximum index into @dest_pages (IN)
* @dest_max_ofs: maximum offset within @dest_pages[@dest_max_index] (IN)
* @xpage: the target page (-1 if none) (IN)
* @xpage_done: set to 1 if xpage was completed successfully (IN/OUT)
* @cb_start: compression block to decompress (IN)
* @cb_size: size of compression block @cb_start in bytes (IN)
* @i_size: file size when we started the read (IN)
* @initialized_size: initialized file size when we started the read (IN)
*
* The caller must have disabled preemption. ntfs_decompress() reenables it when
* the critical section is finished.
*
* This decompresses the compression block @cb_start into the array of
* destination pages @dest_pages starting at index @dest_index into @dest_pages
* and at offset @dest_pos into the page @dest_pages[@dest_index].
*
* When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
* If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
*
* @cb_start is a pointer to the compression block which needs decompressing
* and @cb_size is the size of @cb_start in bytes (8-64kiB).
*
* Return 0 if success or -EOVERFLOW on error in the compressed stream.
* @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
* completed during the decompression of the compression block (@cb_start).
*
* Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
* unpredicatbly! You have been warned!
*
* Note to hackers: This function may not sleep until it has finished accessing
* the compression block @cb_start as it is a per-CPU buffer.
*/
static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
int *dest_index, int *dest_ofs, const int dest_max_index,
const int dest_max_ofs, const int xpage, char *xpage_done,
u8 *const cb_start, const u32 cb_size, const loff_t i_size,
const s64 initialized_size)
{
/*
* Pointers into the compressed data, i.e. the compression block (cb),
* and the therein contained sub-blocks (sb).
*/
u8 *cb_end = cb_start + cb_size; /* End of cb. */
u8 *cb = cb_start; /* Current position in cb. */
u8 *cb_sb_start; /* Beginning of the current sb in the cb. */
u8 *cb_sb_end; /* End of current sb / beginning of next sb. */
/* Variables for uncompressed data / destination. */
struct page *dp; /* Current destination page being worked on. */
u8 *dp_addr; /* Current pointer into dp. */
u8 *dp_sb_start; /* Start of current sub-block in dp. */
u8 *dp_sb_end; /* End of current sb in dp (dp_sb_start +
NTFS_SB_SIZE). */
u16 do_sb_start; /* @dest_ofs when starting this sub-block. */
u16 do_sb_end; /* @dest_ofs of end of this sb (do_sb_start +
NTFS_SB_SIZE). */
/* Variables for tag and token parsing. */
u8 tag; /* Current tag. */
int token; /* Loop counter for the eight tokens in tag. */
int nr_completed_pages = 0;
/* Default error code. */
int err = -EOVERFLOW;
ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
do_next_sb:
ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
cb - cb_start);
/*
* Have we reached the end of the compression block or the end of the
* decompressed data? The latter can happen for example if the current
* position in the compression block is one byte before its end so the
* first two checks do not detect it.
*/
if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
(*dest_index == dest_max_index &&
*dest_ofs == dest_max_ofs)) {
int i;
ntfs_debug("Completed. Returning success (0).");
err = 0;
return_error:
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize completed pages. */
if (nr_completed_pages > 0) {
for (i = 0; i < nr_completed_pages; i++) {
int di = completed_pages[i];
dp = dest_pages[di];
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(dp, i_size,
initialized_size);
flush_dcache_page(dp);
kunmap(dp);
SetPageUptodate(dp);
unlock_page(dp);
if (di == xpage)
*xpage_done = 1;
else
put_page(dp);
dest_pages[di] = NULL;
}
}
return err;
}
/* Setup offsets for the current sub-block destination. */
do_sb_start = *dest_ofs;
do_sb_end = do_sb_start + NTFS_SB_SIZE;
/* Check that we are still within allowed boundaries. */
if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
goto return_overflow;
/* Does the minimum size of a compressed sb overflow valid range? */
if (cb + 6 > cb_end)
goto return_overflow;
/* Setup the current sub-block source pointers and validate range. */
cb_sb_start = cb;
cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
+ 3;
if (cb_sb_end > cb_end)
goto return_overflow;
/* Get the current destination page. */
dp = dest_pages[*dest_index];
if (!dp) {
/* No page present. Skip decompression of this sub-block. */
cb = cb_sb_end;
/* Advance destination position to next sub-block. */
*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
if (!*dest_ofs && (++*dest_index > dest_max_index))
goto return_overflow;
goto do_next_sb;
}
/* We have a valid destination page. Setup the destination pointers. */
dp_addr = (u8*)page_address(dp) + do_sb_start;
/* Now, we are ready to process the current sub-block (sb). */
if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
ntfs_debug("Found uncompressed sub-block.");
/* This sb is not compressed, just copy it into destination. */
/* Advance source position to first data byte. */
cb += 2;
/* An uncompressed sb must be full size. */
if (cb_sb_end - cb != NTFS_SB_SIZE)
goto return_overflow;
/* Copy the block and advance the source position. */
memcpy(dp_addr, cb, NTFS_SB_SIZE);
cb += NTFS_SB_SIZE;
/* Advance destination position to next sub-block. */
*dest_ofs += NTFS_SB_SIZE;
if (!(*dest_ofs &= ~PAGE_MASK)) {
finalize_page:
/*
* First stage: add current page index to array of
* completed pages.
*/
completed_pages[nr_completed_pages++] = *dest_index;
if (++*dest_index > dest_max_index)
goto return_overflow;
}
goto do_next_sb;
}
ntfs_debug("Found compressed sub-block.");
/* This sb is compressed, decompress it into destination. */
/* Setup destination pointers. */
dp_sb_start = dp_addr;
dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
/* Forward to the first tag in the sub-block. */
cb += 2;
do_next_tag:
if (cb == cb_sb_end) {
/* Check if the decompressed sub-block was not full-length. */
if (dp_addr < dp_sb_end) {
int nr_bytes = do_sb_end - *dest_ofs;
ntfs_debug("Filling incomplete sub-block with "
"zeroes.");
/* Zero remainder and update destination position. */
memset(dp_addr, 0, nr_bytes);
*dest_ofs += nr_bytes;
}
/* We have finished the current sub-block. */
if (!(*dest_ofs &= ~PAGE_MASK))
goto finalize_page;
goto do_next_sb;
}
/* Check we are still in range. */
if (cb > cb_sb_end || dp_addr > dp_sb_end)
goto return_overflow;
/* Get the next tag and advance to first token. */
tag = *cb++;
/* Parse the eight tokens described by the tag. */
for (token = 0; token < 8; token++, tag >>= 1) {
u16 lg, pt, length, max_non_overlap;
register u16 i;
u8 *dp_back_addr;
/* Check if we are done / still in range. */
if (cb >= cb_sb_end || dp_addr > dp_sb_end)
break;
/* Determine token type and parse appropriately.*/
if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
/*
* We have a symbol token, copy the symbol across, and
* advance the source and destination positions.
*/
*dp_addr++ = *cb++;
++*dest_ofs;
/* Continue with the next token. */
continue;
}
/*
* We have a phrase token. Make sure it is not the first tag in
* the sb as this is illegal and would confuse the code below.
*/
if (dp_addr == dp_sb_start)
goto return_overflow;
/*
* Determine the number of bytes to go back (p) and the number
* of bytes to copy (l). We use an optimized algorithm in which
* we first calculate log2(current destination position in sb),
* which allows determination of l and p in O(1) rather than
* O(n). We just need an arch-optimized log2() function now.
*/
lg = 0;
for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
lg++;
/* Get the phrase token into i. */
pt = le16_to_cpup((le16*)cb);
/*
* Calculate starting position of the byte sequence in
* the destination using the fact that p = (pt >> (12 - lg)) + 1
* and make sure we don't go too far back.
*/
dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
if (dp_back_addr < dp_sb_start)
goto return_overflow;
/* Now calculate the length of the byte sequence. */
length = (pt & (0xfff >> lg)) + 3;
/* Advance destination position and verify it is in range. */
*dest_ofs += length;
if (*dest_ofs > do_sb_end)
goto return_overflow;
/* The number of non-overlapping bytes. */
max_non_overlap = dp_addr - dp_back_addr;
if (length <= max_non_overlap) {
/* The byte sequence doesn't overlap, just copy it. */
memcpy(dp_addr, dp_back_addr, length);
/* Advance destination pointer. */
dp_addr += length;
} else {
/*
* The byte sequence does overlap, copy non-overlapping
* part and then do a slow byte by byte copy for the
* overlapping part. Also, advance the destination
* pointer.
*/
memcpy(dp_addr, dp_back_addr, max_non_overlap);
dp_addr += max_non_overlap;
dp_back_addr += max_non_overlap;
length -= max_non_overlap;
while (length--)
*dp_addr++ = *dp_back_addr++;
}
/* Advance source position and continue with the next token. */
cb += 2;
}
/* No tokens left in the current tag. Continue with the next tag. */
goto do_next_tag;
return_overflow:
ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
goto return_error;
}
/**
* ntfs_read_compressed_block - read a compressed block into the page cache
* @page: locked page in the compression block(s) we need to read
*
* When we are called the page has already been verified to be locked and the
* attribute is known to be non-resident, not encrypted, but compressed.
*
* 1. Determine which compression block(s) @page is in.
* 2. Get hold of all pages corresponding to this/these compression block(s).
* 3. Read the (first) compression block.
* 4. Decompress it into the corresponding pages.
* 5. Throw the compressed data away and proceed to 3. for the next compression
* block or return success if no more compression blocks left.
*
* Warning: We have to be careful what we do about existing pages. They might
* have been written to so that we would lose data if we were to just overwrite
* them with the out-of-date uncompressed data.
*
* FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
* the end of the file I think. We need to detect this case and zero the out
* of bounds remainder of the page in question and mark it as handled. At the
* moment we would just return -EIO on such a page. This bug will only become
* apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
* clusters so is probably not going to be seen by anyone. Still this should
* be fixed. (AIA)
*
* FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
* handling sparse and compressed cbs. (AIA)
*
* FIXME: At the moment we don't do any zeroing out in the case that
* initialized_size is less than data_size. This should be safe because of the
* nature of the compression algorithm used. Just in case we check and output
* an error message in read inode if the two sizes are not equal for a
* compressed file. (AIA)
*/
int ntfs_read_compressed_block(struct page *page)
{
loff_t i_size;
s64 initialized_size;
struct address_space *mapping = page->mapping;
ntfs_inode *ni = NTFS_I(mapping->host);
ntfs_volume *vol = ni->vol;
struct super_block *sb = vol->sb;
runlist_element *rl;
unsigned long flags, block_size = sb->s_blocksize;
unsigned char block_size_bits = sb->s_blocksize_bits;
u8 *cb, *cb_pos, *cb_end;
struct buffer_head **bhs;
unsigned long offset, index = page->index;
u32 cb_size = ni->itype.compressed.block_size;
u64 cb_size_mask = cb_size - 1UL;
VCN vcn;
LCN lcn;
/* The first wanted vcn (minimum alignment is PAGE_SIZE). */
VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
vol->cluster_size_bits;
/*
* The first vcn after the last wanted vcn (minimum alignment is again
* PAGE_SIZE.
*/
VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
& ~cb_size_mask) >> vol->cluster_size_bits;
/* Number of compression blocks (cbs) in the wanted vcn range. */
unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
>> ni->itype.compressed.block_size_bits;
/*
* Number of pages required to store the uncompressed data from all
* compression blocks (cbs) overlapping @page. Due to alignment
* guarantees of start_vcn and end_vcn, no need to round up here.
*/
unsigned int nr_pages = (end_vcn - start_vcn) <<
vol->cluster_size_bits >> PAGE_SHIFT;
unsigned int xpage, max_page, cur_page, cur_ofs, i;
unsigned int cb_clusters, cb_max_ofs;
int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
struct page **pages;
int *completed_pages;
unsigned char xpage_done = 0;
ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
"%i.", index, cb_size, nr_pages);
/*
* Bad things happen if we get here for anything that is not an
* unnamed $DATA attribute.
*/
BUG_ON(ni->type != AT_DATA);
BUG_ON(ni->name_len);
pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
completed_pages = kmalloc_array(nr_pages + 1, sizeof(int), GFP_NOFS);
/* Allocate memory to store the buffer heads we need. */
bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
bhs = kmalloc(bhs_size, GFP_NOFS);
if (unlikely(!pages || !bhs || !completed_pages)) {
kfree(bhs);
kfree(pages);
kfree(completed_pages);
unlock_page(page);
ntfs_error(vol->sb, "Failed to allocate internal buffers.");
return -ENOMEM;
}
/*
* We have already been given one page, this is the one we must do.
* Once again, the alignment guarantees keep it simple.
*/
offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
xpage = index - offset;
pages[xpage] = page;
/*
* The remaining pages need to be allocated and inserted into the page
* cache, alignment guarantees keep all the below much simpler. (-8
*/
read_lock_irqsave(&ni->size_lock, flags);
i_size = i_size_read(VFS_I(ni));
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
offset;
/* Is the page fully outside i_size? (truncate in progress) */
if (xpage >= max_page) {
kfree(bhs);
kfree(pages);
kfree(completed_pages);
zero_user(page, 0, PAGE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page);
unlock_page(page);
return 0;
}
if (nr_pages < max_page)
max_page = nr_pages;
for (i = 0; i < max_page; i++, offset++) {
if (i != xpage)
pages[i] = grab_cache_page_nowait(mapping, offset);
page = pages[i];
if (page) {
/*
* We only (re)read the page if it isn't already read
* in and/or dirty or we would be losing data or at
* least wasting our time.
*/
if (!PageDirty(page) && (!PageUptodate(page) ||
PageError(page))) {
ClearPageError(page);
kmap(page);
continue;
}
unlock_page(page);
put_page(page);
pages[i] = NULL;
}
}
/*
* We have the runlist, and all the destination pages we need to fill.
* Now read the first compression block.
*/
cur_page = 0;
cur_ofs = 0;
cb_clusters = ni->itype.compressed.block_clusters;
do_next_cb:
nr_cbs--;
nr_bhs = 0;
/* Read all cb buffer heads one cluster at a time. */
rl = NULL;
for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
vcn++) {
bool is_retry = false;
if (!rl) {
lock_retry_remap:
down_read(&ni->runlist.lock);
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
lcn = LCN_RL_NOT_MAPPED;
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)vcn,
(unsigned long long)lcn);
if (lcn < 0) {
/*
* When we reach the first sparse cluster we have
* finished with the cb.
*/
if (lcn == LCN_HOLE)
break;
if (is_retry || lcn != LCN_RL_NOT_MAPPED)
goto rl_err;
is_retry = true;
/*
* Attempt to map runlist, dropping lock for the
* duration.
*/
up_read(&ni->runlist.lock);
if (!ntfs_map_runlist(ni, vcn))
goto lock_retry_remap;
goto map_rl_err;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
/* Read the lcn from device in chunks of block_size bytes. */
max_block = block + (vol->cluster_size >> block_size_bits);
do {
ntfs_debug("block = 0x%x.", block);
if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
goto getblk_err;
nr_bhs++;
} while (++block < max_block);
}
/* Release the lock if we took it. */
if (rl)
up_read(&ni->runlist.lock);
/* Setup and initiate io on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
if (!trylock_buffer(tbh))
continue;
if (unlikely(buffer_uptodate(tbh))) {
unlock_buffer(tbh);
continue;
}
get_bh(tbh);
tbh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, tbh);
}
/* Wait for io completion on all buffer heads. */
for (i = 0; i < nr_bhs; i++) {
struct buffer_head *tbh = bhs[i];
if (buffer_uptodate(tbh))
continue;
wait_on_buffer(tbh);
/*
* We need an optimization barrier here, otherwise we start
* hitting the below fixup code when accessing a loopback
* mounted ntfs partition. This indicates either there is a
* race condition in the loop driver or, more likely, gcc
* overoptimises the code without the barrier and it doesn't
* do the Right Thing(TM).
*/
barrier();
if (unlikely(!buffer_uptodate(tbh))) {
ntfs_warning(vol->sb, "Buffer is unlocked but not "
"uptodate! Unplugging the disk queue "
"and rescheduling.");
get_bh(tbh);
io_schedule();
put_bh(tbh);
if (unlikely(!buffer_uptodate(tbh)))
goto read_err;
ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
}
}
/*
* Get the compression buffer. We must not sleep any more
* until we are finished with it.
*/
spin_lock(&ntfs_cb_lock);
cb = ntfs_compression_buffer;
BUG_ON(!cb);
cb_pos = cb;
cb_end = cb + cb_size;
/* Copy the buffer heads into the contiguous buffer. */
for (i = 0; i < nr_bhs; i++) {
memcpy(cb_pos, bhs[i]->b_data, block_size);
cb_pos += block_size;
}
/* Just a precaution. */
if (cb_pos + 2 <= cb + cb_size)
*(u16*)cb_pos = 0;
/* Reset cb_pos back to the beginning. */
cb_pos = cb;
/* We now have both source (if present) and destination. */
ntfs_debug("Successfully read the compression block.");
/* The last page and maximum offset within it for the current cb. */
cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
cb_max_ofs = cb_max_page & ~PAGE_MASK;
cb_max_page >>= PAGE_SHIFT;
/* Catch end of file inside a compression block. */
if (cb_max_page > max_page)
cb_max_page = max_page;
if (vcn == start_vcn - cb_clusters) {
/* Sparse cb, zero out page range overlapping the cb. */
ntfs_debug("Found sparse compression block.");
/* We can sleep from now on, so we drop lock. */
spin_unlock(&ntfs_cb_lock);
if (cb_max_ofs)
cb_max_page--;
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page) {
if (likely(!cur_ofs))
clear_page(page_address(page));
else
memset(page_address(page) + cur_ofs, 0,
PAGE_SIZE -
cur_ofs);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
if (cur_page == xpage)
xpage_done = 1;
else
put_page(page);
pages[cur_page] = NULL;
}
cb_pos += PAGE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
}
/* If we have a partial final page, deal with it now. */
if (cb_max_ofs && cb_pos < cb_end) {
page = pages[cur_page];
if (page)
memset(page_address(page) + cur_ofs, 0,
cb_max_ofs - cur_ofs);
/*
* No need to update cb_pos at this stage:
* cb_pos += cb_max_ofs - cur_ofs;
*/
cur_ofs = cb_max_ofs;
}
} else if (vcn == start_vcn) {
/* We can't sleep so we need two stages. */
unsigned int cur2_page = cur_page;
unsigned int cur_ofs2 = cur_ofs;
u8 *cb_pos2 = cb_pos;
ntfs_debug("Found uncompressed compression block.");
/* Uncompressed cb, copy it to the destination pages. */
/*
* TODO: As a big optimization, we could detect this case
* before we read all the pages and use block_read_full_folio()
* on all full pages instead (we still have to treat partial
* pages especially but at least we are getting rid of the
* synchronous io for the majority of pages.
* Or if we choose not to do the read-ahead/-behind stuff, we
* could just return block_read_full_folio(pages[xpage]) as long
* as PAGE_SIZE <= cb_size.
*/
if (cb_max_ofs)
cb_max_page--;
/* First stage: copy data into destination pages. */
for (; cur_page < cb_max_page; cur_page++) {
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
PAGE_SIZE - cur_ofs);
cb_pos += PAGE_SIZE - cur_ofs;
cur_ofs = 0;
if (cb_pos >= cb_end)
break;
}
/* If we have a partial final page, deal with it now. */
if (cb_max_ofs && cb_pos < cb_end) {
page = pages[cur_page];
if (page)
memcpy(page_address(page) + cur_ofs, cb_pos,
cb_max_ofs - cur_ofs);
cb_pos += cb_max_ofs - cur_ofs;
cur_ofs = cb_max_ofs;
}
/* We can sleep from now on, so drop lock. */
spin_unlock(&ntfs_cb_lock);
/* Second stage: finalize pages. */
for (; cur2_page < cb_max_page; cur2_page++) {
page = pages[cur2_page];
if (page) {
/*
* If we are outside the initialized size, zero
* the out of bounds page range.
*/
handle_bounds_compressed_page(page, i_size,
initialized_size);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
if (cur2_page == xpage)
xpage_done = 1;
else
put_page(page);
pages[cur2_page] = NULL;
}
cb_pos2 += PAGE_SIZE - cur_ofs2;
cur_ofs2 = 0;
if (cb_pos2 >= cb_end)
break;
}
} else {
/* Compressed cb, decompress it into the destination page(s). */
unsigned int prev_cur_page = cur_page;
ntfs_debug("Found compressed compression block.");
err = ntfs_decompress(pages, completed_pages, &cur_page,
&cur_ofs, cb_max_page, cb_max_ofs, xpage,
&xpage_done, cb_pos, cb_size - (cb_pos - cb),
i_size, initialized_size);
/*
* We can sleep from now on, lock already dropped by
* ntfs_decompress().
*/
if (err) {
ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
"0x%lx with error code %i. Skipping "
"this compression block.",
ni->mft_no, -err);
/* Release the unfinished pages. */
for (; prev_cur_page < cur_page; prev_cur_page++) {
page = pages[prev_cur_page];
if (page) {
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (prev_cur_page != xpage)
put_page(page);
pages[prev_cur_page] = NULL;
}
}
}
}
/* Release the buffer heads. */
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
/* Do we have more work to do? */
if (nr_cbs)
goto do_next_cb;
/* We no longer need the list of buffer heads. */
kfree(bhs);
/* Clean up if we have any pages left. Should never happen. */
for (cur_page = 0; cur_page < max_page; cur_page++) {
page = pages[cur_page];
if (page) {
ntfs_error(vol->sb, "Still have pages left! "
"Terminating them with extreme "
"prejudice. Inode 0x%lx, page index "
"0x%lx.", ni->mft_no, page->index);
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (cur_page != xpage)
put_page(page);
pages[cur_page] = NULL;
}
}
/* We no longer need the list of pages. */
kfree(pages);
kfree(completed_pages);
/* If we have completed the requested page, we return success. */
if (likely(xpage_done))
return 0;
ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
"EOVERFLOW" : (!err ? "EIO" : "unknown error"));
return err < 0 ? err : -EIO;
read_err:
ntfs_error(vol->sb, "IO error while reading compressed data.");
/* Release the buffer heads. */
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
goto err_out;
map_rl_err:
ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
"compression block.");
goto err_out;
rl_err:
up_read(&ni->runlist.lock);
ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
"compression block.");
goto err_out;
getblk_err:
up_read(&ni->runlist.lock);
ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
err_out:
kfree(bhs);
for (i = cur_page; i < max_page; i++) {
page = pages[i];
if (page) {
flush_dcache_page(page);
kunmap(page);
unlock_page(page);
if (i != xpage)
put_page(page);
}
}
kfree(pages);
kfree(completed_pages);
return -EIO;
}
| linux-master | fs/ntfs/compress.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sysctl.c - Code for sysctl handling in NTFS Linux kernel driver. Part of
* the Linux-NTFS project. Adapted from the old NTFS driver,
* Copyright (C) 1997 Martin von Löwis, Régis Duchesne
*
* Copyright (c) 2002-2005 Anton Altaparmakov
*/
#ifdef DEBUG
#include <linux/module.h>
#ifdef CONFIG_SYSCTL
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include "sysctl.h"
#include "debug.h"
/* Definition of the ntfs sysctl. */
static struct ctl_table ntfs_sysctls[] = {
{
.procname = "ntfs-debug",
.data = &debug_msgs, /* Data pointer and size. */
.maxlen = sizeof(debug_msgs),
.mode = 0644, /* Mode, proc handler. */
.proc_handler = proc_dointvec
},
{}
};
/* Storage for the sysctls header. */
static struct ctl_table_header *sysctls_root_table;
/**
* ntfs_sysctl - add or remove the debug sysctl
* @add: add (1) or remove (0) the sysctl
*
* Add or remove the debug sysctl. Return 0 on success or -errno on error.
*/
int ntfs_sysctl(int add)
{
if (add) {
BUG_ON(sysctls_root_table);
sysctls_root_table = register_sysctl("fs", ntfs_sysctls);
if (!sysctls_root_table)
return -ENOMEM;
} else {
BUG_ON(!sysctls_root_table);
unregister_sysctl_table(sysctls_root_table);
sysctls_root_table = NULL;
}
return 0;
}
#endif /* CONFIG_SYSCTL */
#endif /* DEBUG */
| linux-master | fs/ntfs/sysctl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2002 Richard Russon
*/
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include "attrib.h"
#include "aops.h"
#include "bitmap.h"
#include "debug.h"
#include "dir.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "ntfs.h"
#define MAX_BHS (PAGE_SIZE / NTFS_BLOCK_SIZE)
/**
* map_mft_record_page - map the page in which a specific mft record resides
* @ni: ntfs inode whose mft record page to map
*
* This maps the page in which the mft record of the ntfs inode @ni is situated
* and returns a pointer to the mft record within the mapped page.
*
* Return value needs to be checked with IS_ERR() and if that is true PTR_ERR()
* contains the negative error code returned.
*/
static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
{
loff_t i_size;
ntfs_volume *vol = ni->vol;
struct inode *mft_vi = vol->mft_ino;
struct page *page;
unsigned long index, end_index;
unsigned ofs;
BUG_ON(ni->page);
/*
* The index into the page cache and the offset within the page cache
* page of the wanted mft record. FIXME: We need to check for
* overflowing the unsigned long, but I don't think we would ever get
* here if the volume was that big...
*/
index = (u64)ni->mft_no << vol->mft_record_size_bits >>
PAGE_SHIFT;
ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
i_size = i_size_read(mft_vi);
/* The maximum valid index into the page cache for $MFT's data. */
end_index = i_size >> PAGE_SHIFT;
/* If the wanted index is out of bounds the mft record doesn't exist. */
if (unlikely(index >= end_index)) {
if (index > end_index || (i_size & ~PAGE_MASK) < ofs +
vol->mft_record_size) {
page = ERR_PTR(-ENOENT);
ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, "
"which is beyond the end of the mft. "
"This is probably a bug in the ntfs "
"driver.", ni->mft_no);
goto err_out;
}
}
/* Read, map, and pin the page. */
page = ntfs_map_page(mft_vi->i_mapping, index);
if (!IS_ERR(page)) {
/* Catch multi sector transfer fixup errors. */
if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
ofs)))) {
ni->page = page;
ni->page_ofs = ofs;
return page_address(page) + ofs;
}
ntfs_error(vol->sb, "Mft record 0x%lx is corrupt. "
"Run chkdsk.", ni->mft_no);
ntfs_unmap_page(page);
page = ERR_PTR(-EIO);
NVolSetErrors(vol);
}
err_out:
ni->page = NULL;
ni->page_ofs = 0;
return (void*)page;
}
/**
* map_mft_record - map, pin and lock an mft record
* @ni: ntfs inode whose MFT record to map
*
* First, take the mrec_lock mutex. We might now be sleeping, while waiting
* for the mutex if it was already locked by someone else.
*
* The page of the record is mapped using map_mft_record_page() before being
* returned to the caller.
*
* This in turn uses ntfs_map_page() to get the page containing the wanted mft
* record (it in turn calls read_cache_page() which reads it in from disk if
* necessary, increments the use count on the page so that it cannot disappear
* under us and returns a reference to the page cache page).
*
* If read_cache_page() invokes ntfs_readpage() to load the page from disk, it
* sets PG_locked and clears PG_uptodate on the page. Once I/O has completed
* and the post-read mst fixups on each mft record in the page have been
* performed, the page gets PG_uptodate set and PG_locked cleared (this is done
* in our asynchronous I/O completion handler end_buffer_read_mft_async()).
* ntfs_map_page() waits for PG_locked to become clear and checks if
* PG_uptodate is set and returns an error code if not. This provides
* sufficient protection against races when reading/using the page.
*
* However there is the write mapping to think about. Doing the above described
* checking here will be fine, because when initiating the write we will set
* PG_locked and clear PG_uptodate making sure nobody is touching the page
* contents. Doing the locking this way means that the commit to disk code in
* the page cache code paths is automatically sufficiently locked with us as
* we will not touch a page that has been locked or is not uptodate. The only
* locking problem then is them locking the page while we are accessing it.
*
* So that code will end up having to own the mrec_lock of all mft
* records/inodes present in the page before I/O can proceed. In that case we
* wouldn't need to bother with PG_locked and PG_uptodate as nobody will be
* accessing anything without owning the mrec_lock mutex. But we do need to
* use them because of the read_cache_page() invocation and the code becomes so
* much simpler this way that it is well worth it.
*
* The mft record is now ours and we return a pointer to it. You need to check
* the returned pointer with IS_ERR() and if that is true, PTR_ERR() will return
* the error code.
*
* NOTE: Caller is responsible for setting the mft record dirty before calling
* unmap_mft_record(). This is obviously only necessary if the caller really
* modified the mft record...
* Q: Do we want to recycle one of the VFS inode state bits instead?
* A: No, the inode ones mean we want to change the mft record, not we want to
* write it out.
*/
MFT_RECORD *map_mft_record(ntfs_inode *ni)
{
MFT_RECORD *m;
ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
/* Make sure the ntfs inode doesn't go away. */
atomic_inc(&ni->count);
/* Serialize access to this mft record. */
mutex_lock(&ni->mrec_lock);
m = map_mft_record_page(ni);
if (!IS_ERR(m))
return m;
mutex_unlock(&ni->mrec_lock);
atomic_dec(&ni->count);
ntfs_error(ni->vol->sb, "Failed with error code %lu.", -PTR_ERR(m));
return m;
}
/**
* unmap_mft_record_page - unmap the page in which a specific mft record resides
* @ni: ntfs inode whose mft record page to unmap
*
* This unmaps the page in which the mft record of the ntfs inode @ni is
* situated and returns. This is a NOOP if highmem is not configured.
*
* The unmap happens via ntfs_unmap_page() which in turn decrements the use
* count on the page thus releasing it from the pinned state.
*
* We do not actually unmap the page from memory of course, as that will be
* done by the page cache code itself when memory pressure increases or
* whatever.
*/
static inline void unmap_mft_record_page(ntfs_inode *ni)
{
BUG_ON(!ni->page);
// TODO: If dirty, blah...
ntfs_unmap_page(ni->page);
ni->page = NULL;
ni->page_ofs = 0;
return;
}
/**
* unmap_mft_record - release a mapped mft record
* @ni: ntfs inode whose MFT record to unmap
*
* We release the page mapping and the mrec_lock mutex which unmaps the mft
* record and releases it for others to get hold of. We also release the ntfs
* inode by decrementing the ntfs inode reference count.
*
* NOTE: If caller has modified the mft record, it is imperative to set the mft
* record dirty BEFORE calling unmap_mft_record().
*/
void unmap_mft_record(ntfs_inode *ni)
{
struct page *page = ni->page;
BUG_ON(!page);
ntfs_debug("Entering for mft_no 0x%lx.", ni->mft_no);
unmap_mft_record_page(ni);
mutex_unlock(&ni->mrec_lock);
atomic_dec(&ni->count);
/*
* If pure ntfs_inode, i.e. no vfs inode attached, we leave it to
* ntfs_clear_extent_inode() in the extent inode case, and to the
* caller in the non-extent, yet pure ntfs inode case, to do the actual
* tear down of all structures and freeing of all allocated memory.
*/
return;
}
/**
* map_extent_mft_record - load an extent inode and attach it to its base
* @base_ni: base ntfs inode
* @mref: mft reference of the extent inode to load
* @ntfs_ino: on successful return, pointer to the ntfs_inode structure
*
* Load the extent mft record @mref and attach it to its base inode @base_ni.
* Return the mapped extent mft record if IS_ERR(result) is false. Otherwise
* PTR_ERR(result) gives the negative error code.
*
* On successful return, @ntfs_ino contains a pointer to the ntfs_inode
* structure of the mapped extent inode.
*/
MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
ntfs_inode **ntfs_ino)
{
MFT_RECORD *m;
ntfs_inode *ni = NULL;
ntfs_inode **extent_nis = NULL;
int i;
unsigned long mft_no = MREF(mref);
u16 seq_no = MSEQNO(mref);
bool destroy_ni = false;
ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",
mft_no, base_ni->mft_no);
/* Make sure the base ntfs inode doesn't go away. */
atomic_inc(&base_ni->count);
/*
* Check if this extent inode has already been added to the base inode,
* in which case just return it. If not found, add it to the base
* inode before returning it.
*/
mutex_lock(&base_ni->extent_lock);
if (base_ni->nr_extents > 0) {
extent_nis = base_ni->ext.extent_ntfs_inos;
for (i = 0; i < base_ni->nr_extents; i++) {
if (mft_no != extent_nis[i]->mft_no)
continue;
ni = extent_nis[i];
/* Make sure the ntfs inode doesn't go away. */
atomic_inc(&ni->count);
break;
}
}
if (likely(ni != NULL)) {
mutex_unlock(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
/* We found the record; just have to map and return it. */
m = map_mft_record(ni);
/* map_mft_record() has incremented this on success. */
atomic_dec(&ni->count);
if (!IS_ERR(m)) {
/* Verify the sequence number. */
if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
ntfs_debug("Done 1.");
*ntfs_ino = ni;
return m;
}
unmap_mft_record(ni);
ntfs_error(base_ni->vol->sb, "Found stale extent mft "
"reference! Corrupt filesystem. "
"Run chkdsk.");
return ERR_PTR(-EIO);
}
map_err_out:
ntfs_error(base_ni->vol->sb, "Failed to map extent "
"mft record, error code %ld.", -PTR_ERR(m));
return m;
}
/* Record wasn't there. Get a new ntfs inode and initialize it. */
ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
if (unlikely(!ni)) {
mutex_unlock(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
return ERR_PTR(-ENOMEM);
}
ni->vol = base_ni->vol;
ni->seq_no = seq_no;
ni->nr_extents = -1;
ni->ext.base_ntfs_ino = base_ni;
/* Now map the record. */
m = map_mft_record(ni);
if (IS_ERR(m)) {
mutex_unlock(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
ntfs_clear_extent_inode(ni);
goto map_err_out;
}
/* Verify the sequence number if it is present. */
if (seq_no && (le16_to_cpu(m->sequence_number) != seq_no)) {
ntfs_error(base_ni->vol->sb, "Found stale extent mft "
"reference! Corrupt filesystem. Run chkdsk.");
destroy_ni = true;
m = ERR_PTR(-EIO);
goto unm_err_out;
}
/* Attach extent inode to base inode, reallocating memory if needed. */
if (!(base_ni->nr_extents & 3)) {
ntfs_inode **tmp;
int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);
tmp = kmalloc(new_size, GFP_NOFS);
if (unlikely(!tmp)) {
ntfs_error(base_ni->vol->sb, "Failed to allocate "
"internal buffer.");
destroy_ni = true;
m = ERR_PTR(-ENOMEM);
goto unm_err_out;
}
if (base_ni->nr_extents) {
BUG_ON(!base_ni->ext.extent_ntfs_inos);
memcpy(tmp, base_ni->ext.extent_ntfs_inos, new_size -
4 * sizeof(ntfs_inode *));
kfree(base_ni->ext.extent_ntfs_inos);
}
base_ni->ext.extent_ntfs_inos = tmp;
}
base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;
mutex_unlock(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
ntfs_debug("Done 2.");
*ntfs_ino = ni;
return m;
unm_err_out:
unmap_mft_record(ni);
mutex_unlock(&base_ni->extent_lock);
atomic_dec(&base_ni->count);
/*
* If the extent inode was not attached to the base inode we need to
* release it or we will leak memory.
*/
if (destroy_ni)
ntfs_clear_extent_inode(ni);
return m;
}
#ifdef NTFS_RW
/**
* __mark_mft_record_dirty - set the mft record and the page containing it dirty
* @ni: ntfs inode describing the mapped mft record
*
* Internal function. Users should call mark_mft_record_dirty() instead.
*
* Set the mapped (extent) mft record of the (base or extent) ntfs inode @ni,
* as well as the page containing the mft record, dirty. Also, mark the base
* vfs inode dirty. This ensures that any changes to the mft record are
* written out to disk.
*
* NOTE: We only set I_DIRTY_DATASYNC (and not I_DIRTY_PAGES)
* on the base vfs inode, because even though file data may have been modified,
* it is dirty in the inode meta data rather than the data page cache of the
* inode, and thus there are no data pages that need writing out. Therefore, a
* full mark_inode_dirty() is overkill. A mark_inode_dirty_sync(), on the
* other hand, is not sufficient, because ->write_inode needs to be called even
* in case of fdatasync. This needs to happen or the file data would not
* necessarily hit the device synchronously, even though the vfs inode has the
* O_SYNC flag set. Also, I_DIRTY_DATASYNC simply "feels" better than just
* I_DIRTY_SYNC, since the file data has not actually hit the block device yet,
* which is not what I_DIRTY_SYNC on its own would suggest.
*/
void __mark_mft_record_dirty(ntfs_inode *ni)
{
ntfs_inode *base_ni;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
mark_ntfs_record_dirty(ni->page, ni->page_ofs);
/* Determine the base vfs inode and mark it dirty, too. */
mutex_lock(&ni->extent_lock);
if (likely(ni->nr_extents >= 0))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
mutex_unlock(&ni->extent_lock);
__mark_inode_dirty(VFS_I(base_ni), I_DIRTY_DATASYNC);
}
static const char *ntfs_please_email = "Please email "
"[email protected] and say that you saw "
"this message. Thank you.";
/**
* ntfs_sync_mft_mirror_umount - synchronise an mft record to the mft mirror
* @vol: ntfs volume on which the mft record to synchronize resides
* @mft_no: mft record number of mft record to synchronize
* @m: mapped, mst protected (extent) mft record to synchronize
*
* Write the mapped, mst protected (extent) mft record @m with mft record
* number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol,
* bypassing the page cache and the $MFTMirr inode itself.
*
* This function is only for use at umount time when the mft mirror inode has
* already been disposed off. We BUG() if we are called while the mft mirror
* inode is still attached to the volume.
*
* On success return 0. On error return -errno.
*
* NOTE: This function is not implemented yet as I am not convinced it can
* actually be triggered considering the sequence of commits we do in super.c::
* ntfs_put_super(). But just in case we provide this place holder as the
* alternative would be either to BUG() or to get a NULL pointer dereference
* and Oops.
*/
static int ntfs_sync_mft_mirror_umount(ntfs_volume *vol,
const unsigned long mft_no, MFT_RECORD *m)
{
BUG_ON(vol->mftmirr_ino);
ntfs_error(vol->sb, "Umount time mft mirror syncing is not "
"implemented yet. %s", ntfs_please_email);
return -EOPNOTSUPP;
}
/**
* ntfs_sync_mft_mirror - synchronize an mft record to the mft mirror
* @vol: ntfs volume on which the mft record to synchronize resides
* @mft_no: mft record number of mft record to synchronize
* @m: mapped, mst protected (extent) mft record to synchronize
* @sync: if true, wait for i/o completion
*
* Write the mapped, mst protected (extent) mft record @m with mft record
* number @mft_no to the mft mirror ($MFTMirr) of the ntfs volume @vol.
*
* On success return 0. On error return -errno and set the volume errors flag
* in the ntfs volume @vol.
*
* NOTE: We always perform synchronous i/o and ignore the @sync parameter.
*
* TODO: If @sync is false, want to do truly asynchronous i/o, i.e. just
* schedule i/o via ->writepage or do it via kntfsd or whatever.
*/
int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
MFT_RECORD *m, int sync)
{
struct page *page;
unsigned int blocksize = vol->sb->s_blocksize;
int max_bhs = vol->mft_record_size / blocksize;
struct buffer_head *bhs[MAX_BHS];
struct buffer_head *bh, *head;
u8 *kmirr;
runlist_element *rl;
unsigned int block_start, block_end, m_start, m_end, page_ofs;
int i_bhs, nr_bhs, err = 0;
unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
ntfs_debug("Entering for inode 0x%lx.", mft_no);
BUG_ON(!max_bhs);
if (WARN_ON(max_bhs > MAX_BHS))
return -EINVAL;
if (unlikely(!vol->mftmirr_ino)) {
/* This could happen during umount... */
err = ntfs_sync_mft_mirror_umount(vol, mft_no, m);
if (likely(!err))
return err;
goto err_out;
}
/* Get the page containing the mirror copy of the mft record @m. */
page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >>
(PAGE_SHIFT - vol->mft_record_size_bits));
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to map mft mirror page.");
err = PTR_ERR(page);
goto err_out;
}
lock_page(page);
BUG_ON(!PageUptodate(page));
ClearPageUptodate(page);
/* Offset of the mft mirror record inside the page. */
page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
/* The address in the page of the mirror copy of the mft record @m. */
kmirr = page_address(page) + page_ofs;
/* Copy the mst protected mft record to the mirror. */
memcpy(kmirr, m, vol->mft_record_size);
/* Create uptodate buffers if not present. */
if (unlikely(!page_has_buffers(page))) {
struct buffer_head *tail;
bh = head = alloc_page_buffers(page, blocksize, true);
do {
set_buffer_uptodate(bh);
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
attach_page_private(page, head);
}
bh = head = page_buffers(page);
BUG_ON(!bh);
rl = NULL;
nr_bhs = 0;
block_start = 0;
m_start = kmirr - (u8*)page_address(page);
m_end = m_start + vol->mft_record_size;
do {
block_end = block_start + blocksize;
/* If the buffer is outside the mft record, skip it. */
if (block_end <= m_start)
continue;
if (unlikely(block_start >= m_end))
break;
/* Need to map the buffer if it is not mapped already. */
if (unlikely(!buffer_mapped(bh))) {
VCN vcn;
LCN lcn;
unsigned int vcn_ofs;
bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
vcn_ofs = vcn & vol->cluster_size_mask;
vcn >>= vol->cluster_size_bits;
if (!rl) {
down_read(&NTFS_I(vol->mftmirr_ino)->
runlist.lock);
rl = NTFS_I(vol->mftmirr_ino)->runlist.rl;
/*
* $MFTMirr always has the whole of its runlist
* in memory.
*/
BUG_ON(!rl);
}
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
/* For $MFTMirr, only lcn >= 0 is a successful remap. */
if (likely(lcn >= 0)) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn <<
vol->cluster_size_bits) +
vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
} else {
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Cannot write mft mirror "
"record 0x%lx because its "
"location on disk could not "
"be determined (error code "
"%lli).", mft_no,
(long long)lcn);
err = -EIO;
}
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(!nr_bhs && (m_start != block_start));
BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh;
BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
} while (block_start = block_end, (bh = bh->b_this_page) != head);
if (unlikely(rl))
up_read(&NTFS_I(vol->mftmirr_ino)->runlist.lock);
if (likely(!err)) {
/* Lock buffers and start synchronous write i/o on them. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs];
if (!trylock_buffer(tbh))
BUG();
BUG_ON(!buffer_uptodate(tbh));
clear_buffer_dirty(tbh);
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
submit_bh(REQ_OP_WRITE, tbh);
}
/* Wait on i/o completion of buffers. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs];
wait_on_buffer(tbh);
if (unlikely(!buffer_uptodate(tbh))) {
err = -EIO;
/*
* Set the buffer uptodate so the page and
* buffer states do not become out of sync.
*/
set_buffer_uptodate(tbh);
}
}
} else /* if (unlikely(err)) */ {
/* Clean the buffers. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
clear_buffer_dirty(bhs[i_bhs]);
}
/* Current state: all buffers are clean, unlocked, and uptodate. */
/* Remove the mst protection fixups again. */
post_write_mst_fixup((NTFS_RECORD*)kmirr);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
ntfs_unmap_page(page);
if (likely(!err)) {
ntfs_debug("Done.");
} else {
ntfs_error(vol->sb, "I/O error while writing mft mirror "
"record 0x%lx!", mft_no);
err_out:
ntfs_error(vol->sb, "Failed to synchronize $MFTMirr (error "
"code %i). Volume will be left marked dirty "
"on umount. Run ntfsfix on the partition "
"after umounting to correct this.", -err);
NVolSetErrors(vol);
}
return err;
}
/**
* write_mft_record_nolock - write out a mapped (extent) mft record
* @ni: ntfs inode describing the mapped (extent) mft record
* @m: mapped (extent) mft record to write
* @sync: if true, wait for i/o completion
*
* Write the mapped (extent) mft record @m described by the (regular or extent)
* ntfs inode @ni to backing store. If the mft record @m has a counterpart in
* the mft mirror, that is also updated.
*
* We only write the mft record if the ntfs inode @ni is dirty and the first
* buffer belonging to its mft record is dirty, too. We ignore the dirty state
* of subsequent buffers because we could have raced with
* fs/ntfs/aops.c::mark_ntfs_record_dirty().
*
* On success, clean the mft record and return 0. On error, leave the mft
* record dirty and return -errno.
*
* NOTE: We always perform synchronous i/o and ignore the @sync parameter.
* However, if the mft record has a counterpart in the mft mirror and @sync is
* true, we write the mft record, wait for i/o completion, and only then write
* the mft mirror copy. This ensures that if the system crashes either the mft
* or the mft mirror will contain a self-consistent mft record @m. If @sync is
* false on the other hand, we start i/o on both and then wait for completion
* on them. This provides a speedup but no longer guarantees that you will end
* up with a self-consistent mft record in the case of a crash but if you asked
* for asynchronous writing you probably do not care about that anyway.
*
* TODO: If @sync is false, want to do truly asynchronous i/o, i.e. just
* schedule i/o via ->writepage or do it via kntfsd or whatever.
*/
int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
{
ntfs_volume *vol = ni->vol;
struct page *page = ni->page;
unsigned int blocksize = vol->sb->s_blocksize;
unsigned char blocksize_bits = vol->sb->s_blocksize_bits;
int max_bhs = vol->mft_record_size / blocksize;
struct buffer_head *bhs[MAX_BHS];
struct buffer_head *bh, *head;
runlist_element *rl;
unsigned int block_start, block_end, m_start, m_end;
int i_bhs, nr_bhs, err = 0;
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
BUG_ON(!max_bhs);
BUG_ON(!PageLocked(page));
if (WARN_ON(max_bhs > MAX_BHS)) {
err = -EINVAL;
goto err_out;
}
/*
* If the ntfs_inode is clean no need to do anything. If it is dirty,
* mark it as clean now so that it can be redirtied later on if needed.
* There is no danger of races since the caller is holding the locks
* for the mft record @m and the page it is in.
*/
if (!NInoTestClearDirty(ni))
goto done;
bh = head = page_buffers(page);
BUG_ON(!bh);
rl = NULL;
nr_bhs = 0;
block_start = 0;
m_start = ni->page_ofs;
m_end = m_start + vol->mft_record_size;
do {
block_end = block_start + blocksize;
/* If the buffer is outside the mft record, skip it. */
if (block_end <= m_start)
continue;
if (unlikely(block_start >= m_end))
break;
/*
* If this block is not the first one in the record, we ignore
* the buffer's dirty state because we could have raced with a
* parallel mark_ntfs_record_dirty().
*/
if (block_start == m_start) {
/* This block is the first one in the record. */
if (!buffer_dirty(bh)) {
BUG_ON(nr_bhs);
/* Clean records are not written out. */
break;
}
}
/* Need to map the buffer if it is not mapped already. */
if (unlikely(!buffer_mapped(bh))) {
VCN vcn;
LCN lcn;
unsigned int vcn_ofs;
bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = ((VCN)ni->mft_no << vol->mft_record_size_bits) +
(block_start - m_start);
vcn_ofs = vcn & vol->cluster_size_mask;
vcn >>= vol->cluster_size_bits;
if (!rl) {
down_read(&NTFS_I(vol->mft_ino)->runlist.lock);
rl = NTFS_I(vol->mft_ino)->runlist.rl;
BUG_ON(!rl);
}
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
/* For $MFT, only lcn >= 0 is a successful remap. */
if (likely(lcn >= 0)) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn <<
vol->cluster_size_bits) +
vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
} else {
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Cannot write mft record "
"0x%lx because its location "
"on disk could not be "
"determined (error code %lli).",
ni->mft_no, (long long)lcn);
err = -EIO;
}
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(!nr_bhs && (m_start != block_start));
BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh;
BUG_ON((nr_bhs >= max_bhs) && (m_end != block_end));
} while (block_start = block_end, (bh = bh->b_this_page) != head);
if (unlikely(rl))
up_read(&NTFS_I(vol->mft_ino)->runlist.lock);
if (!nr_bhs)
goto done;
if (unlikely(err))
goto cleanup_out;
/* Apply the mst protection fixups. */
err = pre_write_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size);
if (err) {
ntfs_error(vol->sb, "Failed to apply mst fixups!");
goto cleanup_out;
}
flush_dcache_mft_record_page(ni);
/* Lock buffers and start synchronous write i/o on them. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs];
if (!trylock_buffer(tbh))
BUG();
BUG_ON(!buffer_uptodate(tbh));
clear_buffer_dirty(tbh);
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
submit_bh(REQ_OP_WRITE, tbh);
}
/* Synchronize the mft mirror now if not @sync. */
if (!sync && ni->mft_no < vol->mftmirr_size)
ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
/* Wait on i/o completion of buffers. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
struct buffer_head *tbh = bhs[i_bhs];
wait_on_buffer(tbh);
if (unlikely(!buffer_uptodate(tbh))) {
err = -EIO;
/*
* Set the buffer uptodate so the page and buffer
* states do not become out of sync.
*/
if (PageUptodate(page))
set_buffer_uptodate(tbh);
}
}
/* If @sync, now synchronize the mft mirror. */
if (sync && ni->mft_no < vol->mftmirr_size)
ntfs_sync_mft_mirror(vol, ni->mft_no, m, sync);
/* Remove the mst protection fixups again. */
post_write_mst_fixup((NTFS_RECORD*)m);
flush_dcache_mft_record_page(ni);
if (unlikely(err)) {
/* I/O error during writing. This is really bad! */
ntfs_error(vol->sb, "I/O error while writing mft record "
"0x%lx! Marking base inode as bad. You "
"should unmount the volume and run chkdsk.",
ni->mft_no);
goto err_out;
}
done:
ntfs_debug("Done.");
return 0;
cleanup_out:
/* Clean the buffers. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++)
clear_buffer_dirty(bhs[i_bhs]);
err_out:
/*
* Current state: all buffers are clean, unlocked, and uptodate.
* The caller should mark the base inode as bad so that no more i/o
* happens. ->clear_inode() will still be invoked so all extent inodes
* and other allocated memory will be freed.
*/
if (err == -ENOMEM) {
ntfs_error(vol->sb, "Not enough memory to write mft record. "
"Redirtying so the write is retried later.");
mark_mft_record_dirty(ni);
err = 0;
} else
NVolSetErrors(vol);
return err;
}
/**
* ntfs_may_write_mft_record - check if an mft record may be written out
* @vol: [IN] ntfs volume on which the mft record to check resides
* @mft_no: [IN] mft record number of the mft record to check
* @m: [IN] mapped mft record to check
* @locked_ni: [OUT] caller has to unlock this ntfs inode if one is returned
*
* Check if the mapped (base or extent) mft record @m with mft record number
* @mft_no belonging to the ntfs volume @vol may be written out. If necessary
* and possible the ntfs inode of the mft record is locked and the base vfs
* inode is pinned. The locked ntfs inode is then returned in @locked_ni. The
* caller is responsible for unlocking the ntfs inode and unpinning the base
* vfs inode.
*
* Return 'true' if the mft record may be written out and 'false' if not.
*
* The caller has locked the page and cleared the uptodate flag on it which
* means that we can safely write out any dirty mft records that do not have
* their inodes in icache as determined by ilookup5() as anyone
* opening/creating such an inode would block when attempting to map the mft
* record in read_cache_page() until we are finished with the write out.
*
* Here is a description of the tests we perform:
*
* If the inode is found in icache we know the mft record must be a base mft
* record. If it is dirty, we do not write it and return 'false' as the vfs
* inode write paths will result in the access times being updated which would
* cause the base mft record to be redirtied and written out again. (We know
* the access time update will modify the base mft record because Windows
* chkdsk complains if the standard information attribute is not in the base
* mft record.)
*
* If the inode is in icache and not dirty, we attempt to lock the mft record
* and if we find the lock was already taken, it is not safe to write the mft
* record and we return 'false'.
*
* If we manage to obtain the lock we have exclusive access to the mft record,
* which also allows us safe writeout of the mft record. We then set
* @locked_ni to the locked ntfs inode and return 'true'.
*
* Note we cannot just lock the mft record and sleep while waiting for the lock
* because this would deadlock due to lock reversal (normally the mft record is
* locked before the page is locked but we already have the page locked here
* when we try to lock the mft record).
*
* If the inode is not in icache we need to perform further checks.
*
* If the mft record is not a FILE record or it is a base mft record, we can
* safely write it and return 'true'.
*
* We now know the mft record is an extent mft record. We check if the inode
* corresponding to its base mft record is in icache and obtain a reference to
* it if it is. If it is not, we can safely write it and return 'true'.
*
* We now have the base inode for the extent mft record. We check if it has an
* ntfs inode for the extent mft record attached and if not it is safe to write
* the extent mft record and we return 'true'.
*
* The ntfs inode for the extent mft record is attached to the base inode so we
* attempt to lock the extent mft record and if we find the lock was already
* taken, it is not safe to write the extent mft record and we return 'false'.
*
* If we manage to obtain the lock we have exclusive access to the extent mft
* record, which also allows us safe writeout of the extent mft record. We
* set the ntfs inode of the extent mft record clean and then set @locked_ni to
* the now locked ntfs inode and return 'true'.
*
* Note, the reason for actually writing dirty mft records here and not just
* relying on the vfs inode dirty code paths is that we can have mft records
* modified without them ever having actual inodes in memory. Also we can have
* dirty mft records with clean ntfs inodes in memory. None of the described
* cases would result in the dirty mft records being written out if we only
* relied on the vfs inode dirty code paths. And these cases can really occur
* during allocation of new mft records and in particular when the
* initialized_size of the $MFT/$DATA attribute is extended and the new space
* is initialized using ntfs_mft_record_format(). The clean inode can then
* appear if the mft record is reused for a new inode before it got written
* out.
*/
bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no,
const MFT_RECORD *m, ntfs_inode **locked_ni)
{
struct super_block *sb = vol->sb;
struct inode *mft_vi = vol->mft_ino;
struct inode *vi;
ntfs_inode *ni, *eni, **extent_nis;
int i;
ntfs_attr na;
ntfs_debug("Entering for inode 0x%lx.", mft_no);
/*
* Normally we do not return a locked inode so set @locked_ni to NULL.
*/
BUG_ON(!locked_ni);
*locked_ni = NULL;
/*
* Check if the inode corresponding to this mft record is in the VFS
* inode cache and obtain a reference to it if it is.
*/
ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
na.mft_no = mft_no;
na.name = NULL;
na.name_len = 0;
na.type = AT_UNUSED;
/*
* Optimize inode 0, i.e. $MFT itself, since we have it in memory and
* we get here for it rather often.
*/
if (!mft_no) {
/* Balance the below iput(). */
vi = igrab(mft_vi);
BUG_ON(vi != mft_vi);
} else {
/*
* Have to use ilookup5_nowait() since ilookup5() waits for the
* inode lock which causes ntfs to deadlock when a concurrent
* inode write via the inode dirty code paths and the page
* dirty code path of the inode dirty code path when writing
* $MFT occurs.
*/
vi = ilookup5_nowait(sb, mft_no, ntfs_test_inode, &na);
}
if (vi) {
ntfs_debug("Base inode 0x%lx is in icache.", mft_no);
/* The inode is in icache. */
ni = NTFS_I(vi);
/* Take a reference to the ntfs inode. */
atomic_inc(&ni->count);
/* If the inode is dirty, do not write this record. */
if (NInoDirty(ni)) {
ntfs_debug("Inode 0x%lx is dirty, do not write it.",
mft_no);
atomic_dec(&ni->count);
iput(vi);
return false;
}
ntfs_debug("Inode 0x%lx is not dirty.", mft_no);
/* The inode is not dirty, try to take the mft record lock. */
if (unlikely(!mutex_trylock(&ni->mrec_lock))) {
ntfs_debug("Mft record 0x%lx is already locked, do "
"not write it.", mft_no);
atomic_dec(&ni->count);
iput(vi);
return false;
}
ntfs_debug("Managed to lock mft record 0x%lx, write it.",
mft_no);
/*
* The write has to occur while we hold the mft record lock so
* return the locked ntfs inode.
*/
*locked_ni = ni;
return true;
}
ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
/* The inode is not in icache. */
/* Write the record if it is not a mft record (type "FILE"). */
if (!ntfs_is_mft_record(m->magic)) {
ntfs_debug("Mft record 0x%lx is not a FILE record, write it.",
mft_no);
return true;
}
/* Write the mft record if it is a base inode. */
if (!m->base_mft_record) {
ntfs_debug("Mft record 0x%lx is a base record, write it.",
mft_no);
return true;
}
/*
* This is an extent mft record. Check if the inode corresponding to
* its base mft record is in icache and obtain a reference to it if it
* is.
*/
na.mft_no = MREF_LE(m->base_mft_record);
ntfs_debug("Mft record 0x%lx is an extent record. Looking for base "
"inode 0x%lx in icache.", mft_no, na.mft_no);
if (!na.mft_no) {
/* Balance the below iput(). */
vi = igrab(mft_vi);
BUG_ON(vi != mft_vi);
} else
vi = ilookup5_nowait(sb, na.mft_no, ntfs_test_inode,
&na);
if (!vi) {
/*
* The base inode is not in icache, write this extent mft
* record.
*/
ntfs_debug("Base inode 0x%lx is not in icache, write the "
"extent record.", na.mft_no);
return true;
}
ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
/*
* The base inode is in icache. Check if it has the extent inode
* corresponding to this extent mft record attached.
*/
ni = NTFS_I(vi);
mutex_lock(&ni->extent_lock);
if (ni->nr_extents <= 0) {
/*
* The base inode has no attached extent inodes, write this
* extent mft record.
*/
mutex_unlock(&ni->extent_lock);
iput(vi);
ntfs_debug("Base inode 0x%lx has no attached extent inodes, "
"write the extent record.", na.mft_no);
return true;
}
/* Iterate over the attached extent inodes. */
extent_nis = ni->ext.extent_ntfs_inos;
for (eni = NULL, i = 0; i < ni->nr_extents; ++i) {
if (mft_no == extent_nis[i]->mft_no) {
/*
* Found the extent inode corresponding to this extent
* mft record.
*/
eni = extent_nis[i];
break;
}
}
/*
* If the extent inode was not attached to the base inode, write this
* extent mft record.
*/
if (!eni) {
mutex_unlock(&ni->extent_lock);
iput(vi);
ntfs_debug("Extent inode 0x%lx is not attached to its base "
"inode 0x%lx, write the extent record.",
mft_no, na.mft_no);
return true;
}
ntfs_debug("Extent inode 0x%lx is attached to its base inode 0x%lx.",
mft_no, na.mft_no);
/* Take a reference to the extent ntfs inode. */
atomic_inc(&eni->count);
mutex_unlock(&ni->extent_lock);
/*
* Found the extent inode coresponding to this extent mft record.
* Try to take the mft record lock.
*/
if (unlikely(!mutex_trylock(&eni->mrec_lock))) {
atomic_dec(&eni->count);
iput(vi);
ntfs_debug("Extent mft record 0x%lx is already locked, do "
"not write it.", mft_no);
return false;
}
ntfs_debug("Managed to lock extent mft record 0x%lx, write it.",
mft_no);
if (NInoTestClearDirty(eni))
ntfs_debug("Extent inode 0x%lx is dirty, marking it clean.",
mft_no);
/*
* The write has to occur while we hold the mft record lock so return
* the locked extent ntfs inode.
*/
*locked_ni = eni;
return true;
}
static const char *es = " Leaving inconsistent metadata. Unmount and run "
"chkdsk.";
/**
* ntfs_mft_bitmap_find_and_alloc_free_rec_nolock - see name
* @vol: volume on which to search for a free mft record
* @base_ni: open base inode if allocating an extent mft record or NULL
*
* Search for a free mft record in the mft bitmap attribute on the ntfs volume
* @vol.
*
* If @base_ni is NULL start the search at the default allocator position.
*
* If @base_ni is not NULL start the search at the mft record after the base
* mft record @base_ni.
*
* Return the free mft record on success and -errno on error. An error code of
* -ENOSPC means that there are no free mft records in the currently
* initialized mft bitmap.
*
* Locking: Caller must hold vol->mftbmp_lock for writing.
*/
static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
ntfs_inode *base_ni)
{
s64 pass_end, ll, data_pos, pass_start, ofs, bit;
unsigned long flags;
struct address_space *mftbmp_mapping;
u8 *buf, *byte;
struct page *page;
unsigned int page_ofs, size;
u8 pass, b;
ntfs_debug("Searching for free mft record in the currently "
"initialized mft bitmap.");
mftbmp_mapping = vol->mftbmp_ino->i_mapping;
/*
* Set the end of the pass making sure we do not overflow the mft
* bitmap.
*/
read_lock_irqsave(&NTFS_I(vol->mft_ino)->size_lock, flags);
pass_end = NTFS_I(vol->mft_ino)->allocated_size >>
vol->mft_record_size_bits;
read_unlock_irqrestore(&NTFS_I(vol->mft_ino)->size_lock, flags);
read_lock_irqsave(&NTFS_I(vol->mftbmp_ino)->size_lock, flags);
ll = NTFS_I(vol->mftbmp_ino)->initialized_size << 3;
read_unlock_irqrestore(&NTFS_I(vol->mftbmp_ino)->size_lock, flags);
if (pass_end > ll)
pass_end = ll;
pass = 1;
if (!base_ni)
data_pos = vol->mft_data_pos;
else
data_pos = base_ni->mft_no + 1;
if (data_pos < 24)
data_pos = 24;
if (data_pos >= pass_end) {
data_pos = 24;
pass = 2;
/* This happens on a freshly formatted volume. */
if (data_pos >= pass_end)
return -ENOSPC;
}
pass_start = data_pos;
ntfs_debug("Starting bitmap search: pass %u, pass_start 0x%llx, "
"pass_end 0x%llx, data_pos 0x%llx.", pass,
(long long)pass_start, (long long)pass_end,
(long long)data_pos);
/* Loop until a free mft record is found. */
for (; pass <= 2;) {
/* Cap size to pass_end. */
ofs = data_pos >> 3;
page_ofs = ofs & ~PAGE_MASK;
size = PAGE_SIZE - page_ofs;
ll = ((pass_end + 7) >> 3) - ofs;
if (size > ll)
size = ll;
size <<= 3;
/*
* If we are still within the active pass, search the next page
* for a zero bit.
*/
if (size) {
page = ntfs_map_page(mftbmp_mapping,
ofs >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read mft "
"bitmap, aborting.");
return PTR_ERR(page);
}
buf = (u8*)page_address(page) + page_ofs;
bit = data_pos & 7;
data_pos &= ~7ull;
ntfs_debug("Before inner for loop: size 0x%x, "
"data_pos 0x%llx, bit 0x%llx", size,
(long long)data_pos, (long long)bit);
for (; bit < size && data_pos + bit < pass_end;
bit &= ~7ull, bit += 8) {
byte = buf + (bit >> 3);
if (*byte == 0xff)
continue;
b = ffz((unsigned long)*byte);
if (b < 8 && b >= (bit & 7)) {
ll = data_pos + (bit & ~7ull) + b;
if (unlikely(ll > (1ll << 32))) {
ntfs_unmap_page(page);
return -ENOSPC;
}
*byte |= 1 << b;
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
ntfs_debug("Done. (Found and "
"allocated mft record "
"0x%llx.)",
(long long)ll);
return ll;
}
}
ntfs_debug("After inner for loop: size 0x%x, "
"data_pos 0x%llx, bit 0x%llx", size,
(long long)data_pos, (long long)bit);
data_pos += size;
ntfs_unmap_page(page);
/*
* If the end of the pass has not been reached yet,
* continue searching the mft bitmap for a zero bit.
*/
if (data_pos < pass_end)
continue;
}
/* Do the next pass. */
if (++pass == 2) {
/*
* Starting the second pass, in which we scan the first
* part of the zone which we omitted earlier.
*/
pass_end = pass_start;
data_pos = pass_start = 24;
ntfs_debug("pass %i, pass_start 0x%llx, pass_end "
"0x%llx.", pass, (long long)pass_start,
(long long)pass_end);
if (data_pos >= pass_end)
break;
}
}
/* No free mft records in currently initialized mft bitmap. */
ntfs_debug("Done. (No free mft records left in currently initialized "
"mft bitmap.)");
return -ENOSPC;
}
/**
* ntfs_mft_bitmap_extend_allocation_nolock - extend mft bitmap by a cluster
* @vol: volume on which to extend the mft bitmap attribute
*
* Extend the mft bitmap attribute on the ntfs volume @vol by one cluster.
*
* Note: Only changes allocated_size, i.e. does not touch initialized_size or
* data_size.
*
* Return 0 on success and -errno on error.
*
* Locking: - Caller must hold vol->mftbmp_lock for writing.
* - This function takes NTFS_I(vol->mftbmp_ino)->runlist.lock for
* writing and releases it before returning.
* - This function takes vol->lcnbmp_lock for writing and releases it
* before returning.
*/
static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
{
LCN lcn;
s64 ll;
unsigned long flags;
struct page *page;
ntfs_inode *mft_ni, *mftbmp_ni;
runlist_element *rl, *rl2 = NULL;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *mrec;
ATTR_RECORD *a = NULL;
int ret, mp_size;
u32 old_alen = 0;
u8 *b, tb;
struct {
u8 added_cluster:1;
u8 added_run:1;
u8 mp_rebuilt:1;
} status = { 0, 0, 0 };
ntfs_debug("Extending mft bitmap allocation.");
mft_ni = NTFS_I(vol->mft_ino);
mftbmp_ni = NTFS_I(vol->mftbmp_ino);
/*
* Determine the last lcn of the mft bitmap. The allocated size of the
* mft bitmap cannot be zero so we are ok to do this.
*/
down_write(&mftbmp_ni->runlist.lock);
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
ll = mftbmp_ni->allocated_size;
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft bitmap attribute.");
if (!IS_ERR(rl))
ret = -EIO;
else
ret = PTR_ERR(rl);
return ret;
}
lcn = rl->lcn + rl->length;
ntfs_debug("Last lcn of mft bitmap attribute is 0x%llx.",
(long long)lcn);
/*
* Attempt to get the cluster following the last allocated cluster by
* hand as it may be in the MFT zone so the allocator would not give it
* to us.
*/
ll = lcn >> 3;
page = ntfs_map_page(vol->lcnbmp_ino->i_mapping,
ll >> PAGE_SHIFT);
if (IS_ERR(page)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to read from lcn bitmap.");
return PTR_ERR(page);
}
b = (u8*)page_address(page) + (ll & ~PAGE_MASK);
tb = 1 << (lcn & 7ull);
down_write(&vol->lcnbmp_lock);
if (*b != 0xff && !(*b & tb)) {
/* Next cluster is free, allocate it. */
*b |= tb;
flush_dcache_page(page);
set_page_dirty(page);
up_write(&vol->lcnbmp_lock);
ntfs_unmap_page(page);
/* Update the mft bitmap runlist. */
rl->length++;
rl[1].vcn++;
status.added_cluster = 1;
ntfs_debug("Appending one cluster to mft bitmap.");
} else {
up_write(&vol->lcnbmp_lock);
ntfs_unmap_page(page);
/* Allocate a cluster from the DATA_ZONE. */
rl2 = ntfs_cluster_alloc(vol, rl[1].vcn, 1, lcn, DATA_ZONE,
true);
if (IS_ERR(rl2)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to allocate a cluster for "
"the mft bitmap.");
return PTR_ERR(rl2);
}
rl = ntfs_runlists_merge(mftbmp_ni->runlist.rl, rl2);
if (IS_ERR(rl)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to merge runlists for mft "
"bitmap.");
if (ntfs_cluster_free_from_rl(vol, rl2)) {
ntfs_error(vol->sb, "Failed to deallocate "
"allocated cluster.%s", es);
NVolSetErrors(vol);
}
ntfs_free(rl2);
return PTR_ERR(rl);
}
mftbmp_ni->runlist.rl = rl;
status.added_run = 1;
ntfs_debug("Adding one run to mft bitmap.");
/* Find the last run in the new runlist. */
for (; rl[1].length; rl++)
;
}
/*
* Update the attribute record as well. Note: @rl is the last
* (non-terminator) runlist element of mft bitmap.
*/
mrec = map_mft_record(mft_ni);
if (IS_ERR(mrec)) {
ntfs_error(vol->sb, "Failed to map mft record.");
ret = PTR_ERR(mrec);
goto undo_alloc;
}
ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
if (unlikely(!ctx)) {
ntfs_error(vol->sb, "Failed to get search context.");
ret = -ENOMEM;
goto undo_alloc;
}
ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
0, ctx);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"mft bitmap attribute.");
if (ret == -ENOENT)
ret = -EIO;
goto undo_alloc;
}
a = ctx->attr;
ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
/* Search back for the previous last allocated cluster of mft bitmap. */
for (rl2 = rl; rl2 > mftbmp_ni->runlist.rl; rl2--) {
if (ll >= rl2->vcn)
break;
}
BUG_ON(ll < rl2->vcn);
BUG_ON(ll >= rl2->vcn + rl2->length);
/* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) {
ntfs_error(vol->sb, "Get size for mapping pairs failed for "
"mft bitmap attribute extent.");
ret = mp_size;
if (!ret)
ret = -EIO;
goto undo_alloc;
}
/* Expand the attribute record if necessary. */
old_alen = le32_to_cpu(a->length);
ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
if (unlikely(ret)) {
if (ret != -ENOSPC) {
ntfs_error(vol->sb, "Failed to resize attribute "
"record for mft bitmap attribute.");
goto undo_alloc;
}
// TODO: Deal with this by moving this extent to a new mft
// record or by starting a new extent in a new mft record or by
// moving other attributes out of this mft record.
// Note: It will need to be a special mft record and if none of
// those are available it gets rather complicated...
ntfs_error(vol->sb, "Not enough space in this mft record to "
"accommodate extended mft bitmap attribute "
"extent. Cannot handle this yet.");
ret = -EOPNOTSUPP;
goto undo_alloc;
}
status.mp_rebuilt = 1;
/* Generate the mapping pairs array directly into the attr record. */
ret = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, rl2, ll, -1, NULL);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to build mapping pairs array for "
"mft bitmap attribute.");
goto undo_alloc;
}
/* Update the highest_vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
/*
* We now have extended the mft bitmap allocated_size by one cluster.
* Reflect this in the ntfs_inode structure and the attribute record.
*/
if (a->data.non_resident.lowest_vcn) {
/*
* We are not in the first attribute extent, switch to it, but
* first ensure the changes will make it to disk later.
*/
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_reinit_search_ctx(ctx);
ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL,
0, ctx);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to find first attribute "
"extent of mft bitmap attribute.");
goto restore_undo_alloc;
}
a = ctx->attr;
}
write_lock_irqsave(&mftbmp_ni->size_lock, flags);
mftbmp_ni->allocated_size += vol->cluster_size;
a->data.non_resident.allocated_size =
cpu_to_sle64(mftbmp_ni->allocated_size);
write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
up_write(&mftbmp_ni->runlist.lock);
ntfs_debug("Done.");
return 0;
restore_undo_alloc:
ntfs_attr_reinit_search_ctx(ctx);
if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
mftbmp_ni->name_len, CASE_SENSITIVE, rl[1].vcn, NULL,
0, ctx)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"mft bitmap attribute.%s", es);
write_lock_irqsave(&mftbmp_ni->size_lock, flags);
mftbmp_ni->allocated_size += vol->cluster_size;
write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
up_write(&mftbmp_ni->runlist.lock);
/*
* The only thing that is now wrong is ->allocated_size of the
* base attribute extent which chkdsk should be able to fix.
*/
NVolSetErrors(vol);
return ret;
}
a = ctx->attr;
a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 2);
undo_alloc:
if (status.added_cluster) {
/* Truncate the last run in the runlist by one cluster. */
rl->length--;
rl[1].vcn--;
} else if (status.added_run) {
lcn = rl->lcn;
/* Remove the last run from the runlist. */
rl->lcn = rl[1].lcn;
rl->length = 0;
}
/* Deallocate the cluster. */
down_write(&vol->lcnbmp_lock);
if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
ntfs_error(vol->sb, "Failed to free allocated cluster.%s", es);
NVolSetErrors(vol);
}
up_write(&vol->lcnbmp_lock);
if (status.mp_rebuilt) {
if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
a->data.non_resident.mapping_pairs_offset),
old_alen - le16_to_cpu(
a->data.non_resident.mapping_pairs_offset),
rl2, ll, -1, NULL)) {
ntfs_error(vol->sb, "Failed to restore mapping pairs "
"array.%s", es);
NVolSetErrors(vol);
}
if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
ntfs_error(vol->sb, "Failed to restore attribute "
"record.%s", es);
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
}
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (!IS_ERR(mrec))
unmap_mft_record(mft_ni);
up_write(&mftbmp_ni->runlist.lock);
return ret;
}
/**
* ntfs_mft_bitmap_extend_initialized_nolock - extend mftbmp initialized data
* @vol: volume on which to extend the mft bitmap attribute
*
* Extend the initialized portion of the mft bitmap attribute on the ntfs
* volume @vol by 8 bytes.
*
* Note: Only changes initialized_size and data_size, i.e. requires that
* allocated_size is big enough to fit the new initialized_size.
*
* Return 0 on success and -error on error.
*
* Locking: Caller must hold vol->mftbmp_lock for writing.
*/
static int ntfs_mft_bitmap_extend_initialized_nolock(ntfs_volume *vol)
{
s64 old_data_size, old_initialized_size;
unsigned long flags;
struct inode *mftbmp_vi;
ntfs_inode *mft_ni, *mftbmp_ni;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
ATTR_RECORD *a;
int ret;
ntfs_debug("Extending mft bitmap initiailized (and data) size.");
mft_ni = NTFS_I(vol->mft_ino);
mftbmp_vi = vol->mftbmp_ino;
mftbmp_ni = NTFS_I(mftbmp_vi);
/* Get the attribute record. */
mrec = map_mft_record(mft_ni);
if (IS_ERR(mrec)) {
ntfs_error(vol->sb, "Failed to map mft record.");
return PTR_ERR(mrec);
}
ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
if (unlikely(!ctx)) {
ntfs_error(vol->sb, "Failed to get search context.");
ret = -ENOMEM;
goto unm_err_out;
}
ret = ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to find first attribute extent of "
"mft bitmap attribute.");
if (ret == -ENOENT)
ret = -EIO;
goto put_err_out;
}
a = ctx->attr;
write_lock_irqsave(&mftbmp_ni->size_lock, flags);
old_data_size = i_size_read(mftbmp_vi);
old_initialized_size = mftbmp_ni->initialized_size;
/*
* We can simply update the initialized_size before filling the space
* with zeroes because the caller is holding the mft bitmap lock for
* writing which ensures that no one else is trying to access the data.
*/
mftbmp_ni->initialized_size += 8;
a->data.non_resident.initialized_size =
cpu_to_sle64(mftbmp_ni->initialized_size);
if (mftbmp_ni->initialized_size > old_data_size) {
i_size_write(mftbmp_vi, mftbmp_ni->initialized_size);
a->data.non_resident.data_size =
cpu_to_sle64(mftbmp_ni->initialized_size);
}
write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
/* Initialize the mft bitmap attribute value with zeroes. */
ret = ntfs_attr_set(mftbmp_ni, old_initialized_size, 8, 0);
if (likely(!ret)) {
ntfs_debug("Done. (Wrote eight initialized bytes to mft "
"bitmap.");
return 0;
}
ntfs_error(vol->sb, "Failed to write to mft bitmap.");
/* Try to recover from the error. */
mrec = map_mft_record(mft_ni);
if (IS_ERR(mrec)) {
ntfs_error(vol->sb, "Failed to map mft record.%s", es);
NVolSetErrors(vol);
return ret;
}
ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
if (unlikely(!ctx)) {
ntfs_error(vol->sb, "Failed to get search context.%s", es);
NVolSetErrors(vol);
goto unm_err_out;
}
if (ntfs_attr_lookup(mftbmp_ni->type, mftbmp_ni->name,
mftbmp_ni->name_len, CASE_SENSITIVE, 0, NULL, 0, ctx)) {
ntfs_error(vol->sb, "Failed to find first attribute extent of "
"mft bitmap attribute.%s", es);
NVolSetErrors(vol);
put_err_out:
ntfs_attr_put_search_ctx(ctx);
unm_err_out:
unmap_mft_record(mft_ni);
goto err_out;
}
a = ctx->attr;
write_lock_irqsave(&mftbmp_ni->size_lock, flags);
mftbmp_ni->initialized_size = old_initialized_size;
a->data.non_resident.initialized_size =
cpu_to_sle64(old_initialized_size);
if (i_size_read(mftbmp_vi) != old_data_size) {
i_size_write(mftbmp_vi, old_data_size);
a->data.non_resident.data_size = cpu_to_sle64(old_data_size);
}
write_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
#ifdef DEBUG
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
ntfs_debug("Restored status of mftbmp: allocated_size 0x%llx, "
"data_size 0x%llx, initialized_size 0x%llx.",
(long long)mftbmp_ni->allocated_size,
(long long)i_size_read(mftbmp_vi),
(long long)mftbmp_ni->initialized_size);
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
#endif /* DEBUG */
err_out:
return ret;
}
/**
* ntfs_mft_data_extend_allocation_nolock - extend mft data attribute
* @vol: volume on which to extend the mft data attribute
*
* Extend the mft data attribute on the ntfs volume @vol by 16 mft records
* worth of clusters or if not enough space for this by one mft record worth
* of clusters.
*
* Note: Only changes allocated_size, i.e. does not touch initialized_size or
* data_size.
*
* Return 0 on success and -errno on error.
*
* Locking: - Caller must hold vol->mftbmp_lock for writing.
* - This function takes NTFS_I(vol->mft_ino)->runlist.lock for
* writing and releases it before returning.
* - This function calls functions which take vol->lcnbmp_lock for
* writing and release it before returning.
*/
static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
{
LCN lcn;
VCN old_last_vcn;
s64 min_nr, nr, ll;
unsigned long flags;
ntfs_inode *mft_ni;
runlist_element *rl, *rl2;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *mrec;
ATTR_RECORD *a = NULL;
int ret, mp_size;
u32 old_alen = 0;
bool mp_rebuilt = false;
ntfs_debug("Extending mft data allocation.");
mft_ni = NTFS_I(vol->mft_ino);
/*
* Determine the preferred allocation location, i.e. the last lcn of
* the mft data attribute. The allocated size of the mft data
* attribute cannot be zero so we are ok to do this.
*/
down_write(&mft_ni->runlist.lock);
read_lock_irqsave(&mft_ni->size_lock, flags);
ll = mft_ni->allocated_size;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mft_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mft_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft data attribute.");
if (!IS_ERR(rl))
ret = -EIO;
else
ret = PTR_ERR(rl);
return ret;
}
lcn = rl->lcn + rl->length;
ntfs_debug("Last lcn of mft data attribute is 0x%llx.", (long long)lcn);
/* Minimum allocation is one mft record worth of clusters. */
min_nr = vol->mft_record_size >> vol->cluster_size_bits;
if (!min_nr)
min_nr = 1;
/* Want to allocate 16 mft records worth of clusters. */
nr = vol->mft_record_size << 4 >> vol->cluster_size_bits;
if (!nr)
nr = min_nr;
/* Ensure we do not go above 2^32-1 mft records. */
read_lock_irqsave(&mft_ni->size_lock, flags);
ll = mft_ni->allocated_size;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
if (unlikely((ll + (nr << vol->cluster_size_bits)) >>
vol->mft_record_size_bits >= (1ll << 32))) {
nr = min_nr;
if (unlikely((ll + (nr << vol->cluster_size_bits)) >>
vol->mft_record_size_bits >= (1ll << 32))) {
ntfs_warning(vol->sb, "Cannot allocate mft record "
"because the maximum number of inodes "
"(2^32) has already been reached.");
up_write(&mft_ni->runlist.lock);
return -ENOSPC;
}
}
ntfs_debug("Trying mft data allocation with %s cluster count %lli.",
nr > min_nr ? "default" : "minimal", (long long)nr);
old_last_vcn = rl[1].vcn;
do {
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
true);
if (!IS_ERR(rl2))
break;
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
ntfs_error(vol->sb, "Failed to allocate the minimal "
"number of clusters (%lli) for the "
"mft data attribute.", (long long)nr);
up_write(&mft_ni->runlist.lock);
return PTR_ERR(rl2);
}
/*
* There is not enough space to do the allocation, but there
* might be enough space to do a minimal allocation so try that
* before failing.
*/
nr = min_nr;
ntfs_debug("Retrying mft data allocation with minimal cluster "
"count %lli.", (long long)nr);
} while (1);
rl = ntfs_runlists_merge(mft_ni->runlist.rl, rl2);
if (IS_ERR(rl)) {
up_write(&mft_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to merge runlists for mft data "
"attribute.");
if (ntfs_cluster_free_from_rl(vol, rl2)) {
ntfs_error(vol->sb, "Failed to deallocate clusters "
"from the mft data attribute.%s", es);
NVolSetErrors(vol);
}
ntfs_free(rl2);
return PTR_ERR(rl);
}
mft_ni->runlist.rl = rl;
ntfs_debug("Allocated %lli clusters.", (long long)nr);
/* Find the last run in the new runlist. */
for (; rl[1].length; rl++)
;
/* Update the attribute record as well. */
mrec = map_mft_record(mft_ni);
if (IS_ERR(mrec)) {
ntfs_error(vol->sb, "Failed to map mft record.");
ret = PTR_ERR(mrec);
goto undo_alloc;
}
ctx = ntfs_attr_get_search_ctx(mft_ni, mrec);
if (unlikely(!ctx)) {
ntfs_error(vol->sb, "Failed to get search context.");
ret = -ENOMEM;
goto undo_alloc;
}
ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"mft data attribute.");
if (ret == -ENOENT)
ret = -EIO;
goto undo_alloc;
}
a = ctx->attr;
ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
/* Search back for the previous last allocated cluster of mft bitmap. */
for (rl2 = rl; rl2 > mft_ni->runlist.rl; rl2--) {
if (ll >= rl2->vcn)
break;
}
BUG_ON(ll < rl2->vcn);
BUG_ON(ll >= rl2->vcn + rl2->length);
/* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) {
ntfs_error(vol->sb, "Get size for mapping pairs failed for "
"mft data attribute extent.");
ret = mp_size;
if (!ret)
ret = -EIO;
goto undo_alloc;
}
/* Expand the attribute record if necessary. */
old_alen = le32_to_cpu(a->length);
ret = ntfs_attr_record_resize(ctx->mrec, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
if (unlikely(ret)) {
if (ret != -ENOSPC) {
ntfs_error(vol->sb, "Failed to resize attribute "
"record for mft data attribute.");
goto undo_alloc;
}
// TODO: Deal with this by moving this extent to a new mft
// record or by starting a new extent in a new mft record or by
// moving other attributes out of this mft record.
// Note: Use the special reserved mft records and ensure that
// this extent is not required to find the mft record in
// question. If no free special records left we would need to
// move an existing record away, insert ours in its place, and
// then place the moved record into the newly allocated space
// and we would then need to update all references to this mft
// record appropriately. This is rather complicated...
ntfs_error(vol->sb, "Not enough space in this mft record to "
"accommodate extended mft data attribute "
"extent. Cannot handle this yet.");
ret = -EOPNOTSUPP;
goto undo_alloc;
}
mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */
ret = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, rl2, ll, -1, NULL);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to build mapping pairs array of "
"mft data attribute.");
goto undo_alloc;
}
/* Update the highest_vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64(rl[1].vcn - 1);
/*
* We now have extended the mft data allocated_size by nr clusters.
* Reflect this in the ntfs_inode structure and the attribute record.
* @rl is the last (non-terminator) runlist element of mft data
* attribute.
*/
if (a->data.non_resident.lowest_vcn) {
/*
* We are not in the first attribute extent, switch to it, but
* first ensure the changes will make it to disk later.
*/
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_reinit_search_ctx(ctx);
ret = ntfs_attr_lookup(mft_ni->type, mft_ni->name,
mft_ni->name_len, CASE_SENSITIVE, 0, NULL, 0,
ctx);
if (unlikely(ret)) {
ntfs_error(vol->sb, "Failed to find first attribute "
"extent of mft data attribute.");
goto restore_undo_alloc;
}
a = ctx->attr;
}
write_lock_irqsave(&mft_ni->size_lock, flags);
mft_ni->allocated_size += nr << vol->cluster_size_bits;
a->data.non_resident.allocated_size =
cpu_to_sle64(mft_ni->allocated_size);
write_unlock_irqrestore(&mft_ni->size_lock, flags);
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
up_write(&mft_ni->runlist.lock);
ntfs_debug("Done.");
return 0;
restore_undo_alloc:
ntfs_attr_reinit_search_ctx(ctx);
if (ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
CASE_SENSITIVE, rl[1].vcn, NULL, 0, ctx)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"mft data attribute.%s", es);
write_lock_irqsave(&mft_ni->size_lock, flags);
mft_ni->allocated_size += nr << vol->cluster_size_bits;
write_unlock_irqrestore(&mft_ni->size_lock, flags);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
up_write(&mft_ni->runlist.lock);
/*
* The only thing that is now wrong is ->allocated_size of the
* base attribute extent which chkdsk should be able to fix.
*/
NVolSetErrors(vol);
return ret;
}
ctx->attr->data.non_resident.highest_vcn =
cpu_to_sle64(old_last_vcn - 1);
undo_alloc:
if (ntfs_cluster_free(mft_ni, old_last_vcn, -1, ctx) < 0) {
ntfs_error(vol->sb, "Failed to free clusters from mft data "
"attribute.%s", es);
NVolSetErrors(vol);
}
if (ntfs_rl_truncate_nolock(vol, &mft_ni->runlist, old_last_vcn)) {
ntfs_error(vol->sb, "Failed to truncate mft data attribute "
"runlist.%s", es);
NVolSetErrors(vol);
}
if (ctx) {
a = ctx->attr;
if (mp_rebuilt && !IS_ERR(ctx->mrec)) {
if (ntfs_mapping_pairs_build(vol, (u8 *)a + le16_to_cpu(
a->data.non_resident.mapping_pairs_offset),
old_alen - le16_to_cpu(
a->data.non_resident.mapping_pairs_offset),
rl2, ll, -1, NULL)) {
ntfs_error(vol->sb, "Failed to restore mapping pairs "
"array.%s", es);
NVolSetErrors(vol);
}
if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
ntfs_error(vol->sb, "Failed to restore attribute "
"record.%s", es);
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
} else if (IS_ERR(ctx->mrec)) {
ntfs_error(vol->sb, "Failed to restore attribute search "
"context.%s", es);
NVolSetErrors(vol);
}
ntfs_attr_put_search_ctx(ctx);
}
if (!IS_ERR(mrec))
unmap_mft_record(mft_ni);
up_write(&mft_ni->runlist.lock);
return ret;
}
/**
* ntfs_mft_record_layout - layout an mft record into a memory buffer
* @vol: volume to which the mft record will belong
* @mft_no: mft reference specifying the mft record number
* @m: destination buffer of size >= @vol->mft_record_size bytes
*
* Layout an empty, unused mft record with the mft record number @mft_no into
* the buffer @m. The volume @vol is needed because the mft record structure
* was modified in NTFS 3.1 so we need to know which volume version this mft
* record will be used on.
*
* Return 0 on success and -errno on error.
*/
static int ntfs_mft_record_layout(const ntfs_volume *vol, const s64 mft_no,
MFT_RECORD *m)
{
ATTR_RECORD *a;
ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
if (mft_no >= (1ll << 32)) {
ntfs_error(vol->sb, "Mft record number 0x%llx exceeds "
"maximum of 2^32.", (long long)mft_no);
return -ERANGE;
}
/* Start by clearing the whole mft record to gives us a clean slate. */
memset(m, 0, vol->mft_record_size);
/* Aligned to 2-byte boundary. */
if (vol->major_ver < 3 || (vol->major_ver == 3 && !vol->minor_ver))
m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD_OLD) + 1) & ~1);
else {
m->usa_ofs = cpu_to_le16((sizeof(MFT_RECORD) + 1) & ~1);
/*
* Set the NTFS 3.1+ specific fields while we know that the
* volume version is 3.1+.
*/
m->reserved = 0;
m->mft_record_number = cpu_to_le32((u32)mft_no);
}
m->magic = magic_FILE;
if (vol->mft_record_size >= NTFS_BLOCK_SIZE)
m->usa_count = cpu_to_le16(vol->mft_record_size /
NTFS_BLOCK_SIZE + 1);
else {
m->usa_count = cpu_to_le16(1);
ntfs_warning(vol->sb, "Sector size is bigger than mft record "
"size. Setting usa_count to 1. If chkdsk "
"reports this as corruption, please email "
"[email protected] stating "
"that you saw this message and that the "
"modified filesystem created was corrupt. "
"Thank you.");
}
/* Set the update sequence number to 1. */
*(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = cpu_to_le16(1);
m->lsn = 0;
m->sequence_number = cpu_to_le16(1);
m->link_count = 0;
/*
* Place the attributes straight after the update sequence array,
* aligned to 8-byte boundary.
*/
m->attrs_offset = cpu_to_le16((le16_to_cpu(m->usa_ofs) +
(le16_to_cpu(m->usa_count) << 1) + 7) & ~7);
m->flags = 0;
/*
* Using attrs_offset plus eight bytes (for the termination attribute).
* attrs_offset is already aligned to 8-byte boundary, so no need to
* align again.
*/
m->bytes_in_use = cpu_to_le32(le16_to_cpu(m->attrs_offset) + 8);
m->bytes_allocated = cpu_to_le32(vol->mft_record_size);
m->base_mft_record = 0;
m->next_attr_instance = 0;
/* Add the termination attribute. */
a = (ATTR_RECORD*)((u8*)m + le16_to_cpu(m->attrs_offset));
a->type = AT_END;
a->length = 0;
ntfs_debug("Done.");
return 0;
}
/**
* ntfs_mft_record_format - format an mft record on an ntfs volume
* @vol: volume on which to format the mft record
* @mft_no: mft record number to format
*
* Format the mft record @mft_no in $MFT/$DATA, i.e. lay out an empty, unused
* mft record into the appropriate place of the mft data attribute. This is
* used when extending the mft data attribute.
*
* Return 0 on success and -errno on error.
*/
static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
{
loff_t i_size;
struct inode *mft_vi = vol->mft_ino;
struct page *page;
MFT_RECORD *m;
pgoff_t index, end_index;
unsigned int ofs;
int err;
ntfs_debug("Entering for mft record 0x%llx.", (long long)mft_no);
/*
* The index into the page cache and the offset within the page cache
* page of the wanted mft record.
*/
index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT;
ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK;
/* The maximum valid index into the page cache for $MFT's data. */
i_size = i_size_read(mft_vi);
end_index = i_size >> PAGE_SHIFT;
if (unlikely(index >= end_index)) {
if (unlikely(index > end_index || ofs + vol->mft_record_size >=
(i_size & ~PAGE_MASK))) {
ntfs_error(vol->sb, "Tried to format non-existing mft "
"record 0x%llx.", (long long)mft_no);
return -ENOENT;
}
}
/* Read, map, and pin the page containing the mft record. */
page = ntfs_map_page(mft_vi->i_mapping, index);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to map page containing mft record "
"to format 0x%llx.", (long long)mft_no);
return PTR_ERR(page);
}
lock_page(page);
BUG_ON(!PageUptodate(page));
ClearPageUptodate(page);
m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
err = ntfs_mft_record_layout(vol, mft_no, m);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to layout mft record 0x%llx.",
(long long)mft_no);
SetPageUptodate(page);
unlock_page(page);
ntfs_unmap_page(page);
return err;
}
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
/*
* Make sure the mft record is written out to disk. We could use
* ilookup5() to check if an inode is in icache and so on but this is
* unnecessary as ntfs_writepage() will write the dirty record anyway.
*/
mark_ntfs_record_dirty(page, ofs);
ntfs_unmap_page(page);
ntfs_debug("Done.");
return 0;
}
/**
* ntfs_mft_record_alloc - allocate an mft record on an ntfs volume
* @vol: [IN] volume on which to allocate the mft record
* @mode: [IN] mode if want a file or directory, i.e. base inode or 0
* @base_ni: [IN] open base inode if allocating an extent mft record or NULL
* @mrec: [OUT] on successful return this is the mapped mft record
*
* Allocate an mft record in $MFT/$DATA of an open ntfs volume @vol.
*
* If @base_ni is NULL make the mft record a base mft record, i.e. a file or
* direvctory inode, and allocate it at the default allocator position. In
* this case @mode is the file mode as given to us by the caller. We in
* particular use @mode to distinguish whether a file or a directory is being
* created (S_IFDIR(mode) and S_IFREG(mode), respectively).
*
* If @base_ni is not NULL make the allocated mft record an extent record,
* allocate it starting at the mft record after the base mft record and attach
* the allocated and opened ntfs inode to the base inode @base_ni. In this
* case @mode must be 0 as it is meaningless for extent inodes.
*
* You need to check the return value with IS_ERR(). If false, the function
* was successful and the return value is the now opened ntfs inode of the
* allocated mft record. *@mrec is then set to the allocated, mapped, pinned,
* and locked mft record. If IS_ERR() is true, the function failed and the
* error code is obtained from PTR_ERR(return value). *@mrec is undefined in
* this case.
*
* Allocation strategy:
*
* To find a free mft record, we scan the mft bitmap for a zero bit. To
* optimize this we start scanning at the place specified by @base_ni or if
* @base_ni is NULL we start where we last stopped and we perform wrap around
* when we reach the end. Note, we do not try to allocate mft records below
* number 24 because numbers 0 to 15 are the defined system files anyway and 16
* to 24 are special in that they are used for storing extension mft records
* for the $DATA attribute of $MFT. This is required to avoid the possibility
* of creating a runlist with a circular dependency which once written to disk
* can never be read in again. Windows will only use records 16 to 24 for
* normal files if the volume is completely out of space. We never use them
* which means that when the volume is really out of space we cannot create any
* more files while Windows can still create up to 8 small files. We can start
* doing this at some later time, it does not matter much for now.
*
* When scanning the mft bitmap, we only search up to the last allocated mft
* record. If there are no free records left in the range 24 to number of
* allocated mft records, then we extend the $MFT/$DATA attribute in order to
* create free mft records. We extend the allocated size of $MFT/$DATA by 16
* records at a time or one cluster, if cluster size is above 16kiB. If there
* is not sufficient space to do this, we try to extend by a single mft record
* or one cluster, if cluster size is above the mft record size.
*
* No matter how many mft records we allocate, we initialize only the first
* allocated mft record, incrementing mft data size and initialized size
* accordingly, open an ntfs_inode for it and return it to the caller, unless
* there are less than 24 mft records, in which case we allocate and initialize
* mft records until we reach record 24 which we consider as the first free mft
* record for use by normal files.
*
* If during any stage we overflow the initialized data in the mft bitmap, we
* extend the initialized size (and data size) by 8 bytes, allocating another
* cluster if required. The bitmap data size has to be at least equal to the
* number of mft records in the mft, but it can be bigger, in which case the
* superflous bits are padded with zeroes.
*
* Thus, when we return successfully (IS_ERR() is false), we will have:
* - initialized / extended the mft bitmap if necessary,
* - initialized / extended the mft data if necessary,
* - set the bit corresponding to the mft record being allocated in the
* mft bitmap,
* - opened an ntfs_inode for the allocated mft record, and we will have
* - returned the ntfs_inode as well as the allocated mapped, pinned, and
* locked mft record.
*
* On error, the volume will be left in a consistent state and no record will
* be allocated. If rolling back a partial operation fails, we may leave some
* inconsistent metadata in which case we set NVolErrors() so the volume is
* left dirty when unmounted.
*
* Note, this function cannot make use of most of the normal functions, like
* for example for attribute resizing, etc, because when the run list overflows
* the base mft record and an attribute list is used, it is very important that
* the extension mft records used to store the $DATA attribute of $MFT can be
* reached without having to read the information contained inside them, as
* this would make it impossible to find them in the first place after the
* volume is unmounted. $MFT/$BITMAP probably does not need to follow this
* rule because the bitmap is not essential for finding the mft records, but on
* the other hand, handling the bitmap in this special way would make life
* easier because otherwise there might be circular invocations of functions
* when reading the bitmap.
*/
ntfs_inode *ntfs_mft_record_alloc(ntfs_volume *vol, const int mode,
ntfs_inode *base_ni, MFT_RECORD **mrec)
{
s64 ll, bit, old_data_initialized, old_data_size;
unsigned long flags;
struct inode *vi;
struct page *page;
ntfs_inode *mft_ni, *mftbmp_ni, *ni;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
ATTR_RECORD *a;
pgoff_t index;
unsigned int ofs;
int err;
le16 seq_no, usn;
bool record_formatted = false;
if (base_ni) {
ntfs_debug("Entering (allocating an extent mft record for "
"base mft record 0x%llx).",
(long long)base_ni->mft_no);
/* @mode and @base_ni are mutually exclusive. */
BUG_ON(mode);
} else
ntfs_debug("Entering (allocating a base mft record).");
if (mode) {
/* @mode and @base_ni are mutually exclusive. */
BUG_ON(base_ni);
/* We only support creation of normal files and directories. */
if (!S_ISREG(mode) && !S_ISDIR(mode))
return ERR_PTR(-EOPNOTSUPP);
}
BUG_ON(!mrec);
mft_ni = NTFS_I(vol->mft_ino);
mftbmp_ni = NTFS_I(vol->mftbmp_ino);
down_write(&vol->mftbmp_lock);
bit = ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(vol, base_ni);
if (bit >= 0) {
ntfs_debug("Found and allocated free record (#1), bit 0x%llx.",
(long long)bit);
goto have_alloc_rec;
}
if (bit != -ENOSPC) {
up_write(&vol->mftbmp_lock);
return ERR_PTR(bit);
}
/*
* No free mft records left. If the mft bitmap already covers more
* than the currently used mft records, the next records are all free,
* so we can simply allocate the first unused mft record.
* Note: We also have to make sure that the mft bitmap at least covers
* the first 24 mft records as they are special and whilst they may not
* be in use, we do not allocate from them.
*/
read_lock_irqsave(&mft_ni->size_lock, flags);
ll = mft_ni->initialized_size >> vol->mft_record_size_bits;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
old_data_initialized = mftbmp_ni->initialized_size;
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
if (old_data_initialized << 3 > ll && old_data_initialized > 3) {
bit = ll;
if (bit < 24)
bit = 24;
if (unlikely(bit >= (1ll << 32)))
goto max_err_out;
ntfs_debug("Found free record (#2), bit 0x%llx.",
(long long)bit);
goto found_free_rec;
}
/*
* The mft bitmap needs to be expanded until it covers the first unused
* mft record that we can allocate.
* Note: The smallest mft record we allocate is mft record 24.
*/
bit = old_data_initialized << 3;
if (unlikely(bit >= (1ll << 32)))
goto max_err_out;
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
old_data_size = mftbmp_ni->allocated_size;
ntfs_debug("Status of mftbmp before extension: allocated_size 0x%llx, "
"data_size 0x%llx, initialized_size 0x%llx.",
(long long)old_data_size,
(long long)i_size_read(vol->mftbmp_ino),
(long long)old_data_initialized);
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
if (old_data_initialized + 8 > old_data_size) {
/* Need to extend bitmap by one more cluster. */
ntfs_debug("mftbmp: initialized_size + 8 > allocated_size.");
err = ntfs_mft_bitmap_extend_allocation_nolock(vol);
if (unlikely(err)) {
up_write(&vol->mftbmp_lock);
goto err_out;
}
#ifdef DEBUG
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
ntfs_debug("Status of mftbmp after allocation extension: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mftbmp_ni->allocated_size,
(long long)i_size_read(vol->mftbmp_ino),
(long long)mftbmp_ni->initialized_size);
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
#endif /* DEBUG */
}
/*
* We now have sufficient allocated space, extend the initialized_size
* as well as the data_size if necessary and fill the new space with
* zeroes.
*/
err = ntfs_mft_bitmap_extend_initialized_nolock(vol);
if (unlikely(err)) {
up_write(&vol->mftbmp_lock);
goto err_out;
}
#ifdef DEBUG
read_lock_irqsave(&mftbmp_ni->size_lock, flags);
ntfs_debug("Status of mftbmp after initialized extension: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mftbmp_ni->allocated_size,
(long long)i_size_read(vol->mftbmp_ino),
(long long)mftbmp_ni->initialized_size);
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
#endif /* DEBUG */
ntfs_debug("Found free record (#3), bit 0x%llx.", (long long)bit);
found_free_rec:
/* @bit is the found free mft record, allocate it in the mft bitmap. */
ntfs_debug("At found_free_rec.");
err = ntfs_bitmap_set_bit(vol->mftbmp_ino, bit);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to allocate bit in mft bitmap.");
up_write(&vol->mftbmp_lock);
goto err_out;
}
ntfs_debug("Set bit 0x%llx in mft bitmap.", (long long)bit);
have_alloc_rec:
/*
* The mft bitmap is now uptodate. Deal with mft data attribute now.
* Note, we keep hold of the mft bitmap lock for writing until all
* modifications to the mft data attribute are complete, too, as they
* will impact decisions for mft bitmap and mft record allocation done
* by a parallel allocation and if the lock is not maintained a
* parallel allocation could allocate the same mft record as this one.
*/
ll = (bit + 1) << vol->mft_record_size_bits;
read_lock_irqsave(&mft_ni->size_lock, flags);
old_data_initialized = mft_ni->initialized_size;
read_unlock_irqrestore(&mft_ni->size_lock, flags);
if (ll <= old_data_initialized) {
ntfs_debug("Allocated mft record already initialized.");
goto mft_rec_already_initialized;
}
ntfs_debug("Initializing allocated mft record.");
/*
* The mft record is outside the initialized data. Extend the mft data
* attribute until it covers the allocated record. The loop is only
* actually traversed more than once when a freshly formatted volume is
* first written to so it optimizes away nicely in the common case.
*/
read_lock_irqsave(&mft_ni->size_lock, flags);
ntfs_debug("Status of mft data before extension: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mft_ni->allocated_size,
(long long)i_size_read(vol->mft_ino),
(long long)mft_ni->initialized_size);
while (ll > mft_ni->allocated_size) {
read_unlock_irqrestore(&mft_ni->size_lock, flags);
err = ntfs_mft_data_extend_allocation_nolock(vol);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to extend mft data "
"allocation.");
goto undo_mftbmp_alloc_nolock;
}
read_lock_irqsave(&mft_ni->size_lock, flags);
ntfs_debug("Status of mft data after allocation extension: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mft_ni->allocated_size,
(long long)i_size_read(vol->mft_ino),
(long long)mft_ni->initialized_size);
}
read_unlock_irqrestore(&mft_ni->size_lock, flags);
/*
* Extend mft data initialized size (and data size of course) to reach
* the allocated mft record, formatting the mft records allong the way.
* Note: We only modify the ntfs_inode structure as that is all that is
* needed by ntfs_mft_record_format(). We will update the attribute
* record itself in one fell swoop later on.
*/
write_lock_irqsave(&mft_ni->size_lock, flags);
old_data_initialized = mft_ni->initialized_size;
old_data_size = vol->mft_ino->i_size;
while (ll > mft_ni->initialized_size) {
s64 new_initialized_size, mft_no;
new_initialized_size = mft_ni->initialized_size +
vol->mft_record_size;
mft_no = mft_ni->initialized_size >> vol->mft_record_size_bits;
if (new_initialized_size > i_size_read(vol->mft_ino))
i_size_write(vol->mft_ino, new_initialized_size);
write_unlock_irqrestore(&mft_ni->size_lock, flags);
ntfs_debug("Initializing mft record 0x%llx.",
(long long)mft_no);
err = ntfs_mft_record_format(vol, mft_no);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to format mft record.");
goto undo_data_init;
}
write_lock_irqsave(&mft_ni->size_lock, flags);
mft_ni->initialized_size = new_initialized_size;
}
write_unlock_irqrestore(&mft_ni->size_lock, flags);
record_formatted = true;
/* Update the mft data attribute record to reflect the new sizes. */
m = map_mft_record(mft_ni);
if (IS_ERR(m)) {
ntfs_error(vol->sb, "Failed to map mft record.");
err = PTR_ERR(m);
goto undo_data_init;
}
ctx = ntfs_attr_get_search_ctx(mft_ni, m);
if (unlikely(!ctx)) {
ntfs_error(vol->sb, "Failed to get search context.");
err = -ENOMEM;
unmap_mft_record(mft_ni);
goto undo_data_init;
}
err = ntfs_attr_lookup(mft_ni->type, mft_ni->name, mft_ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to find first attribute extent of "
"mft data attribute.");
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
goto undo_data_init;
}
a = ctx->attr;
read_lock_irqsave(&mft_ni->size_lock, flags);
a->data.non_resident.initialized_size =
cpu_to_sle64(mft_ni->initialized_size);
a->data.non_resident.data_size =
cpu_to_sle64(i_size_read(vol->mft_ino));
read_unlock_irqrestore(&mft_ni->size_lock, flags);
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(mft_ni);
read_lock_irqsave(&mft_ni->size_lock, flags);
ntfs_debug("Status of mft data after mft record initialization: "
"allocated_size 0x%llx, data_size 0x%llx, "
"initialized_size 0x%llx.",
(long long)mft_ni->allocated_size,
(long long)i_size_read(vol->mft_ino),
(long long)mft_ni->initialized_size);
BUG_ON(i_size_read(vol->mft_ino) > mft_ni->allocated_size);
BUG_ON(mft_ni->initialized_size > i_size_read(vol->mft_ino));
read_unlock_irqrestore(&mft_ni->size_lock, flags);
mft_rec_already_initialized:
/*
* We can finally drop the mft bitmap lock as the mft data attribute
* has been fully updated. The only disparity left is that the
* allocated mft record still needs to be marked as in use to match the
* set bit in the mft bitmap but this is actually not a problem since
* this mft record is not referenced from anywhere yet and the fact
* that it is allocated in the mft bitmap means that no-one will try to
* allocate it either.
*/
up_write(&vol->mftbmp_lock);
/*
* We now have allocated and initialized the mft record. Calculate the
* index of and the offset within the page cache page the record is in.
*/
index = bit << vol->mft_record_size_bits >> PAGE_SHIFT;
ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK;
/* Read, map, and pin the page containing the mft record. */
page = ntfs_map_page(vol->mft_ino->i_mapping, index);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to map page containing allocated "
"mft record 0x%llx.", (long long)bit);
err = PTR_ERR(page);
goto undo_mftbmp_alloc;
}
lock_page(page);
BUG_ON(!PageUptodate(page));
ClearPageUptodate(page);
m = (MFT_RECORD*)((u8*)page_address(page) + ofs);
/* If we just formatted the mft record no need to do it again. */
if (!record_formatted) {
/* Sanity check that the mft record is really not in use. */
if (ntfs_is_file_record(m->magic) &&
(m->flags & MFT_RECORD_IN_USE)) {
ntfs_error(vol->sb, "Mft record 0x%llx was marked "
"free in mft bitmap but is marked "
"used itself. Corrupt filesystem. "
"Unmount and run chkdsk.",
(long long)bit);
err = -EIO;
SetPageUptodate(page);
unlock_page(page);
ntfs_unmap_page(page);
NVolSetErrors(vol);
goto undo_mftbmp_alloc;
}
/*
* We need to (re-)format the mft record, preserving the
* sequence number if it is not zero as well as the update
* sequence number if it is not zero or -1 (0xffff). This
* means we do not need to care whether or not something went
* wrong with the previous mft record.
*/
seq_no = m->sequence_number;
usn = *(le16*)((u8*)m + le16_to_cpu(m->usa_ofs));
err = ntfs_mft_record_layout(vol, bit, m);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to layout allocated mft "
"record 0x%llx.", (long long)bit);
SetPageUptodate(page);
unlock_page(page);
ntfs_unmap_page(page);
goto undo_mftbmp_alloc;
}
if (seq_no)
m->sequence_number = seq_no;
if (usn && le16_to_cpu(usn) != 0xffff)
*(le16*)((u8*)m + le16_to_cpu(m->usa_ofs)) = usn;
}
/* Set the mft record itself in use. */
m->flags |= MFT_RECORD_IN_USE;
if (S_ISDIR(mode))
m->flags |= MFT_RECORD_IS_DIRECTORY;
flush_dcache_page(page);
SetPageUptodate(page);
if (base_ni) {
MFT_RECORD *m_tmp;
/*
* Setup the base mft record in the extent mft record. This
* completes initialization of the allocated extent mft record
* and we can simply use it with map_extent_mft_record().
*/
m->base_mft_record = MK_LE_MREF(base_ni->mft_no,
base_ni->seq_no);
/*
* Allocate an extent inode structure for the new mft record,
* attach it to the base inode @base_ni and map, pin, and lock
* its, i.e. the allocated, mft record.
*/
m_tmp = map_extent_mft_record(base_ni, bit, &ni);
if (IS_ERR(m_tmp)) {
ntfs_error(vol->sb, "Failed to map allocated extent "
"mft record 0x%llx.", (long long)bit);
err = PTR_ERR(m_tmp);
/* Set the mft record itself not in use. */
m->flags &= cpu_to_le16(
~le16_to_cpu(MFT_RECORD_IN_USE));
flush_dcache_page(page);
/* Make sure the mft record is written out to disk. */
mark_ntfs_record_dirty(page, ofs);
unlock_page(page);
ntfs_unmap_page(page);
goto undo_mftbmp_alloc;
}
BUG_ON(m != m_tmp);
/*
* Make sure the allocated mft record is written out to disk.
* No need to set the inode dirty because the caller is going
* to do that anyway after finishing with the new extent mft
* record (e.g. at a minimum a new attribute will be added to
* the mft record.
*/
mark_ntfs_record_dirty(page, ofs);
unlock_page(page);
/*
* Need to unmap the page since map_extent_mft_record() mapped
* it as well so we have it mapped twice at the moment.
*/
ntfs_unmap_page(page);
} else {
/*
* Allocate a new VFS inode and set it up. NOTE: @vi->i_nlink
* is set to 1 but the mft record->link_count is 0. The caller
* needs to bear this in mind.
*/
vi = new_inode(vol->sb);
if (unlikely(!vi)) {
err = -ENOMEM;
/* Set the mft record itself not in use. */
m->flags &= cpu_to_le16(
~le16_to_cpu(MFT_RECORD_IN_USE));
flush_dcache_page(page);
/* Make sure the mft record is written out to disk. */
mark_ntfs_record_dirty(page, ofs);
unlock_page(page);
ntfs_unmap_page(page);
goto undo_mftbmp_alloc;
}
vi->i_ino = bit;
/* The owner and group come from the ntfs volume. */
vi->i_uid = vol->uid;
vi->i_gid = vol->gid;
/* Initialize the ntfs specific part of @vi. */
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
/*
* Set the appropriate mode, attribute type, and name. For
* directories, also setup the index values to the defaults.
*/
if (S_ISDIR(mode)) {
vi->i_mode = S_IFDIR | S_IRWXUGO;
vi->i_mode &= ~vol->dmask;
NInoSetMstProtected(ni);
ni->type = AT_INDEX_ALLOCATION;
ni->name = I30;
ni->name_len = 4;
ni->itype.index.block_size = 4096;
ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1;
ni->itype.index.collation_rule = COLLATION_FILE_NAME;
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
ni->itype.index.vcn_size_bits =
vol->cluster_size_bits;
} else {
ni->itype.index.vcn_size = vol->sector_size;
ni->itype.index.vcn_size_bits =
vol->sector_size_bits;
}
} else {
vi->i_mode = S_IFREG | S_IRWXUGO;
vi->i_mode &= ~vol->fmask;
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
}
if (IS_RDONLY(vi))
vi->i_mode &= ~S_IWUGO;
/* Set the inode times to the current time. */
vi->i_atime = vi->i_mtime = inode_set_ctime_current(vi);
/*
* Set the file size to 0, the ntfs inode sizes are set to 0 by
* the call to ntfs_init_big_inode() below.
*/
vi->i_size = 0;
vi->i_blocks = 0;
/* Set the sequence number. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
/*
* Manually map, pin, and lock the mft record as we already
* have its page mapped and it is very easy to do.
*/
atomic_inc(&ni->count);
mutex_lock(&ni->mrec_lock);
ni->page = page;
ni->page_ofs = ofs;
/*
* Make sure the allocated mft record is written out to disk.
* NOTE: We do not set the ntfs inode dirty because this would
* fail in ntfs_write_inode() because the inode does not have a
* standard information attribute yet. Also, there is no need
* to set the inode dirty because the caller is going to do
* that anyway after finishing with the new mft record (e.g. at
* a minimum some new attributes will be added to the mft
* record.
*/
mark_ntfs_record_dirty(page, ofs);
unlock_page(page);
/* Add the inode to the inode hash for the superblock. */
insert_inode_hash(vi);
/* Update the default mft allocation position. */
vol->mft_data_pos = bit + 1;
}
/*
* Return the opened, allocated inode of the allocated mft record as
* well as the mapped, pinned, and locked mft record.
*/
ntfs_debug("Returning opened, allocated %sinode 0x%llx.",
base_ni ? "extent " : "", (long long)bit);
*mrec = m;
return ni;
undo_data_init:
write_lock_irqsave(&mft_ni->size_lock, flags);
mft_ni->initialized_size = old_data_initialized;
i_size_write(vol->mft_ino, old_data_size);
write_unlock_irqrestore(&mft_ni->size_lock, flags);
goto undo_mftbmp_alloc_nolock;
undo_mftbmp_alloc:
down_write(&vol->mftbmp_lock);
undo_mftbmp_alloc_nolock:
if (ntfs_bitmap_clear_bit(vol->mftbmp_ino, bit)) {
ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
NVolSetErrors(vol);
}
up_write(&vol->mftbmp_lock);
err_out:
return ERR_PTR(err);
max_err_out:
ntfs_warning(vol->sb, "Cannot allocate mft record because the maximum "
"number of inodes (2^32) has already been reached.");
up_write(&vol->mftbmp_lock);
return ERR_PTR(-ENOSPC);
}
/**
* ntfs_extent_mft_record_free - free an extent mft record on an ntfs volume
* @ni: ntfs inode of the mapped extent mft record to free
* @m: mapped extent mft record of the ntfs inode @ni
*
* Free the mapped extent mft record @m of the extent ntfs inode @ni.
*
* Note that this function unmaps the mft record and closes and destroys @ni
* internally and hence you cannot use either @ni nor @m any more after this
* function returns success.
*
* On success return 0 and on error return -errno. @ni and @m are still valid
* in this case and have not been freed.
*
* For some errors an error message is displayed and the success code 0 is
* returned and the volume is then left dirty on umount. This makes sense in
* case we could not rollback the changes that were already done since the
* caller no longer wants to reference this mft record so it does not matter to
* the caller if something is wrong with it as long as it is properly detached
* from the base inode.
*/
int ntfs_extent_mft_record_free(ntfs_inode *ni, MFT_RECORD *m)
{
unsigned long mft_no = ni->mft_no;
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
ntfs_inode **extent_nis;
int i, err;
le16 old_seq_no;
u16 seq_no;
BUG_ON(NInoAttr(ni));
BUG_ON(ni->nr_extents != -1);
mutex_lock(&ni->extent_lock);
base_ni = ni->ext.base_ntfs_ino;
mutex_unlock(&ni->extent_lock);
BUG_ON(base_ni->nr_extents <= 0);
ntfs_debug("Entering for extent inode 0x%lx, base inode 0x%lx.\n",
mft_no, base_ni->mft_no);
mutex_lock(&base_ni->extent_lock);
/* Make sure we are holding the only reference to the extent inode. */
if (atomic_read(&ni->count) > 2) {
ntfs_error(vol->sb, "Tried to free busy extent inode 0x%lx, "
"not freeing.", base_ni->mft_no);
mutex_unlock(&base_ni->extent_lock);
return -EBUSY;
}
/* Dissociate the ntfs inode from the base inode. */
extent_nis = base_ni->ext.extent_ntfs_inos;
err = -ENOENT;
for (i = 0; i < base_ni->nr_extents; i++) {
if (ni != extent_nis[i])
continue;
extent_nis += i;
base_ni->nr_extents--;
memmove(extent_nis, extent_nis + 1, (base_ni->nr_extents - i) *
sizeof(ntfs_inode*));
err = 0;
break;
}
mutex_unlock(&base_ni->extent_lock);
if (unlikely(err)) {
ntfs_error(vol->sb, "Extent inode 0x%lx is not attached to "
"its base inode 0x%lx.", mft_no,
base_ni->mft_no);
BUG();
}
/*
* The extent inode is no longer attached to the base inode so no one
* can get a reference to it any more.
*/
/* Mark the mft record as not in use. */
m->flags &= ~MFT_RECORD_IN_USE;
/* Increment the sequence number, skipping zero, if it is not zero. */
old_seq_no = m->sequence_number;
seq_no = le16_to_cpu(old_seq_no);
if (seq_no == 0xffff)
seq_no = 1;
else if (seq_no)
seq_no++;
m->sequence_number = cpu_to_le16(seq_no);
/*
* Set the ntfs inode dirty and write it out. We do not need to worry
* about the base inode here since whatever caused the extent mft
* record to be freed is guaranteed to do it already.
*/
NInoSetDirty(ni);
err = write_mft_record(ni, m, 0);
if (unlikely(err)) {
ntfs_error(vol->sb, "Failed to write mft record 0x%lx, not "
"freeing.", mft_no);
goto rollback;
}
rollback_error:
/* Unmap and throw away the now freed extent inode. */
unmap_extent_mft_record(ni);
ntfs_clear_extent_inode(ni);
/* Clear the bit in the $MFT/$BITMAP corresponding to this record. */
down_write(&vol->mftbmp_lock);
err = ntfs_bitmap_clear_bit(vol->mftbmp_ino, mft_no);
up_write(&vol->mftbmp_lock);
if (unlikely(err)) {
/*
* The extent inode is gone but we failed to deallocate it in
* the mft bitmap. Just emit a warning and leave the volume
* dirty on umount.
*/
ntfs_error(vol->sb, "Failed to clear bit in mft bitmap.%s", es);
NVolSetErrors(vol);
}
return 0;
rollback:
/* Rollback what we did... */
mutex_lock(&base_ni->extent_lock);
extent_nis = base_ni->ext.extent_ntfs_inos;
if (!(base_ni->nr_extents & 3)) {
int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode*);
extent_nis = kmalloc(new_size, GFP_NOFS);
if (unlikely(!extent_nis)) {
ntfs_error(vol->sb, "Failed to allocate internal "
"buffer during rollback.%s", es);
mutex_unlock(&base_ni->extent_lock);
NVolSetErrors(vol);
goto rollback_error;
}
if (base_ni->nr_extents) {
BUG_ON(!base_ni->ext.extent_ntfs_inos);
memcpy(extent_nis, base_ni->ext.extent_ntfs_inos,
new_size - 4 * sizeof(ntfs_inode*));
kfree(base_ni->ext.extent_ntfs_inos);
}
base_ni->ext.extent_ntfs_inos = extent_nis;
}
m->flags |= MFT_RECORD_IN_USE;
m->sequence_number = old_seq_no;
extent_nis[base_ni->nr_extents++] = ni;
mutex_unlock(&base_ni->extent_lock);
mark_mft_record_dirty(ni);
return err;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/mft.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004-2005 Anton Altaparmakov
*/
#ifdef NTFS_RW
#include <linux/pagemap.h>
#include "bitmap.h"
#include "debug.h"
#include "aops.h"
#include "ntfs.h"
/**
* __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
* @vi: vfs inode describing the bitmap
* @start_bit: first bit to set
* @count: number of bits to set
* @value: value to set the bits to (i.e. 0 or 1)
* @is_rollback: if 'true' this is a rollback operation
*
* Set @count bits starting at bit @start_bit in the bitmap described by the
* vfs inode @vi to @value, where @value is either 0 or 1.
*
* @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead.
*
* Return 0 on success and -errno on error.
*/
int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const bool is_rollback)
{
s64 cnt = count;
pgoff_t index, end_index;
struct address_space *mapping;
struct page *page;
u8 *kaddr;
int pos, len;
u8 bit;
BUG_ON(!vi);
ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, "
"value %u.%s", vi->i_ino, (unsigned long long)start_bit,
(unsigned long long)cnt, (unsigned int)value,
is_rollback ? " (rollback)" : "");
BUG_ON(start_bit < 0);
BUG_ON(cnt < 0);
BUG_ON(value > 1);
/*
* Calculate the indices for the pages containing the first and last
* bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
*/
index = start_bit >> (3 + PAGE_SHIFT);
end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
/* Get the page containing the first bit (@start_bit). */
mapping = vi->i_mapping;
page = ntfs_map_page(mapping, index);
if (IS_ERR(page)) {
if (!is_rollback)
ntfs_error(vi->i_sb, "Failed to map first page (error "
"%li), aborting.", PTR_ERR(page));
return PTR_ERR(page);
}
kaddr = page_address(page);
/* Set @pos to the position of the byte containing @start_bit. */
pos = (start_bit >> 3) & ~PAGE_MASK;
/* Calculate the position of @start_bit in the first byte. */
bit = start_bit & 7;
/* If the first byte is partial, modify the appropriate bits in it. */
if (bit) {
u8 *byte = kaddr + pos;
while ((bit & 7) && cnt) {
cnt--;
if (value)
*byte |= 1 << bit++;
else
*byte &= ~(1 << bit++);
}
/* If we are done, unmap the page and return success. */
if (!cnt)
goto done;
/* Update @pos to the new position. */
pos++;
}
/*
* Depending on @value, modify all remaining whole bytes in the page up
* to @cnt.
*/
len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
memset(kaddr + pos, value ? 0xff : 0, len);
cnt -= len << 3;
/* Update @len to point to the first not-done byte in the page. */
if (cnt < 8)
len += pos;
/* If we are not in the last page, deal with all subsequent pages. */
while (index < end_index) {
BUG_ON(cnt <= 0);
/* Update @index and get the next page. */
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
page = ntfs_map_page(mapping, ++index);
if (IS_ERR(page))
goto rollback;
kaddr = page_address(page);
/*
* Depending on @value, modify all remaining whole bytes in the
* page up to @cnt.
*/
len = min_t(s64, cnt >> 3, PAGE_SIZE);
memset(kaddr, value ? 0xff : 0, len);
cnt -= len << 3;
}
/*
* The currently mapped page is the last one. If the last byte is
* partial, modify the appropriate bits in it. Note, @len is the
* position of the last byte inside the page.
*/
if (cnt) {
u8 *byte;
BUG_ON(cnt > 7);
bit = cnt;
byte = kaddr + len;
while (bit--) {
if (value)
*byte |= 1 << bit;
else
*byte &= ~(1 << bit);
}
}
done:
/* We are done. Unmap the page and return success. */
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
ntfs_debug("Done.");
return 0;
rollback:
/*
* Current state:
* - no pages are mapped
* - @count - @cnt is the number of bits that have been modified
*/
if (is_rollback)
return PTR_ERR(page);
if (count != cnt)
pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
value ? 0 : 1, true);
else
pos = 0;
if (!pos) {
/* Rollback was successful. */
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
"%li), aborting.", PTR_ERR(page));
} else {
/* Rollback failed. */
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
"%li) and rollback failed (error %i). "
"Aborting and leaving inconsistent metadata. "
"Unmount and run chkdsk.", PTR_ERR(page), pos);
NVolSetErrors(NTFS_SB(vi->i_sb));
}
return PTR_ERR(page);
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/bitmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* lcnalloc.c - Cluster (de)allocation code. Part of the Linux-NTFS project.
*
* Copyright (c) 2004-2005 Anton Altaparmakov
*/
#ifdef NTFS_RW
#include <linux/pagemap.h>
#include "lcnalloc.h"
#include "debug.h"
#include "bitmap.h"
#include "inode.h"
#include "volume.h"
#include "attrib.h"
#include "malloc.h"
#include "aops.h"
#include "ntfs.h"
/**
* ntfs_cluster_free_from_rl_nolock - free clusters from runlist
* @vol: mounted ntfs volume on which to free the clusters
* @rl: runlist describing the clusters to free
*
* Free all the clusters described by the runlist @rl on the volume @vol. In
* the case of an error being returned, at least some of the clusters were not
* freed.
*
* Return 0 on success and -errno on error.
*
* Locking: - The volume lcn bitmap must be locked for writing on entry and is
* left locked on return.
*/
int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol,
const runlist_element *rl)
{
struct inode *lcnbmp_vi = vol->lcnbmp_ino;
int ret = 0;
ntfs_debug("Entering.");
if (!rl)
return 0;
for (; rl->length; rl++) {
int err;
if (rl->lcn < 0)
continue;
err = ntfs_bitmap_clear_run(lcnbmp_vi, rl->lcn, rl->length);
if (unlikely(err && (!ret || ret == -ENOMEM) && ret != err))
ret = err;
}
ntfs_debug("Done.");
return ret;
}
/**
* ntfs_cluster_alloc - allocate clusters on an ntfs volume
* @vol: mounted ntfs volume on which to allocate the clusters
* @start_vcn: vcn to use for the first allocated cluster
* @count: number of clusters to allocate
* @start_lcn: starting lcn at which to allocate the clusters (or -1 if none)
* @zone: zone from which to allocate the clusters
* @is_extension: if 'true', this is an attribute extension
*
* Allocate @count clusters preferably starting at cluster @start_lcn or at the
* current allocator position if @start_lcn is -1, on the mounted ntfs volume
* @vol. @zone is either DATA_ZONE for allocation of normal clusters or
* MFT_ZONE for allocation of clusters for the master file table, i.e. the
* $MFT/$DATA attribute.
*
* @start_vcn specifies the vcn of the first allocated cluster. This makes
* merging the resulting runlist with the old runlist easier.
*
* If @is_extension is 'true', the caller is allocating clusters to extend an
* attribute and if it is 'false', the caller is allocating clusters to fill a
* hole in an attribute. Practically the difference is that if @is_extension
* is 'true' the returned runlist will be terminated with LCN_ENOENT and if
* @is_extension is 'false' the runlist will be terminated with
* LCN_RL_NOT_MAPPED.
*
* You need to check the return value with IS_ERR(). If this is false, the
* function was successful and the return value is a runlist describing the
* allocated cluster(s). If IS_ERR() is true, the function failed and
* PTR_ERR() gives you the error code.
*
* Notes on the allocation algorithm
* =================================
*
* There are two data zones. First is the area between the end of the mft zone
* and the end of the volume, and second is the area between the start of the
* volume and the start of the mft zone. On unmodified/standard NTFS 1.x
* volumes, the second data zone does not exist due to the mft zone being
* expanded to cover the start of the volume in order to reserve space for the
* mft bitmap attribute.
*
* This is not the prettiest function but the complexity stems from the need of
* implementing the mft vs data zoned approach and from the fact that we have
* access to the lcn bitmap in portions of up to 8192 bytes at a time, so we
* need to cope with crossing over boundaries of two buffers. Further, the
* fact that the allocator allows for caller supplied hints as to the location
* of where allocation should begin and the fact that the allocator keeps track
* of where in the data zones the next natural allocation should occur,
* contribute to the complexity of the function. But it should all be
* worthwhile, because this allocator should: 1) be a full implementation of
* the MFT zone approach used by Windows NT, 2) cause reduction in
* fragmentation, and 3) be speedy in allocations (the code is not optimized
* for speed, but the algorithm is, so further speed improvements are probably
* possible).
*
* FIXME: We should be monitoring cluster allocation and increment the MFT zone
* size dynamically but this is something for the future. We will just cause
* heavier fragmentation by not doing it and I am not even sure Windows would
* grow the MFT zone dynamically, so it might even be correct not to do this.
* The overhead in doing dynamic MFT zone expansion would be very large and
* unlikely worth the effort. (AIA)
*
* TODO: I have added in double the required zone position pointer wrap around
* logic which can be optimized to having only one of the two logic sets.
* However, having the double logic will work fine, but if we have only one of
* the sets and we get it wrong somewhere, then we get into trouble, so
* removing the duplicate logic requires _very_ careful consideration of _all_
* possible code paths. So at least for now, I am leaving the double logic -
* better safe than sorry... (AIA)
*
* Locking: - The volume lcn bitmap must be unlocked on entry and is unlocked
* on return.
* - This function takes the volume lcn bitmap lock for writing and
* modifies the bitmap contents.
*/
runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn,
const s64 count, const LCN start_lcn,
const NTFS_CLUSTER_ALLOCATION_ZONES zone,
const bool is_extension)
{
LCN zone_start, zone_end, bmp_pos, bmp_initial_pos, last_read_pos, lcn;
LCN prev_lcn = 0, prev_run_len = 0, mft_zone_size;
s64 clusters;
loff_t i_size;
struct inode *lcnbmp_vi;
runlist_element *rl = NULL;
struct address_space *mapping;
struct page *page = NULL;
u8 *buf, *byte;
int err = 0, rlpos, rlsize, buf_size;
u8 pass, done_zones, search_zone, need_writeback = 0, bit;
ntfs_debug("Entering for start_vcn 0x%llx, count 0x%llx, start_lcn "
"0x%llx, zone %s_ZONE.", (unsigned long long)start_vcn,
(unsigned long long)count,
(unsigned long long)start_lcn,
zone == MFT_ZONE ? "MFT" : "DATA");
BUG_ON(!vol);
lcnbmp_vi = vol->lcnbmp_ino;
BUG_ON(!lcnbmp_vi);
BUG_ON(start_vcn < 0);
BUG_ON(count < 0);
BUG_ON(start_lcn < -1);
BUG_ON(zone < FIRST_ZONE);
BUG_ON(zone > LAST_ZONE);
/* Return NULL if @count is zero. */
if (!count)
return NULL;
/* Take the lcnbmp lock for writing. */
down_write(&vol->lcnbmp_lock);
/*
* If no specific @start_lcn was requested, use the current data zone
* position, otherwise use the requested @start_lcn but make sure it
* lies outside the mft zone. Also set done_zones to 0 (no zones done)
* and pass depending on whether we are starting inside a zone (1) or
* at the beginning of a zone (2). If requesting from the MFT_ZONE,
* we either start at the current position within the mft zone or at
* the specified position. If the latter is out of bounds then we start
* at the beginning of the MFT_ZONE.
*/
done_zones = 0;
pass = 1;
/*
* zone_start and zone_end are the current search range. search_zone
* is 1 for mft zone, 2 for data zone 1 (end of mft zone till end of
* volume) and 4 for data zone 2 (start of volume till start of mft
* zone).
*/
zone_start = start_lcn;
if (zone_start < 0) {
if (zone == DATA_ZONE)
zone_start = vol->data1_zone_pos;
else
zone_start = vol->mft_zone_pos;
if (!zone_start) {
/*
* Zone starts at beginning of volume which means a
* single pass is sufficient.
*/
pass = 2;
}
} else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start &&
zone_start < vol->mft_zone_end) {
zone_start = vol->mft_zone_end;
/*
* Starting at beginning of data1_zone which means a single
* pass in this zone is sufficient.
*/
pass = 2;
} else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start ||
zone_start >= vol->mft_zone_end)) {
zone_start = vol->mft_lcn;
if (!vol->mft_zone_end)
zone_start = 0;
/*
* Starting at beginning of volume which means a single pass
* is sufficient.
*/
pass = 2;
}
if (zone == MFT_ZONE) {
zone_end = vol->mft_zone_end;
search_zone = 1;
} else /* if (zone == DATA_ZONE) */ {
/* Skip searching the mft zone. */
done_zones |= 1;
if (zone_start >= vol->mft_zone_end) {
zone_end = vol->nr_clusters;
search_zone = 2;
} else {
zone_end = vol->mft_zone_start;
search_zone = 4;
}
}
/*
* bmp_pos is the current bit position inside the bitmap. We use
* bmp_initial_pos to determine whether or not to do a zone switch.
*/
bmp_pos = bmp_initial_pos = zone_start;
/* Loop until all clusters are allocated, i.e. clusters == 0. */
clusters = count;
rlpos = rlsize = 0;
mapping = lcnbmp_vi->i_mapping;
i_size = i_size_read(lcnbmp_vi);
while (1) {
ntfs_debug("Start of outer while loop: done_zones 0x%x, "
"search_zone %i, pass %i, zone_start 0x%llx, "
"zone_end 0x%llx, bmp_initial_pos 0x%llx, "
"bmp_pos 0x%llx, rlpos %i, rlsize %i.",
done_zones, search_zone, pass,
(unsigned long long)zone_start,
(unsigned long long)zone_end,
(unsigned long long)bmp_initial_pos,
(unsigned long long)bmp_pos, rlpos, rlsize);
/* Loop until we run out of free clusters. */
last_read_pos = bmp_pos >> 3;
ntfs_debug("last_read_pos 0x%llx.",
(unsigned long long)last_read_pos);
if (last_read_pos > i_size) {
ntfs_debug("End of attribute reached. "
"Skipping to zone_pass_done.");
goto zone_pass_done;
}
if (likely(page)) {
if (need_writeback) {
ntfs_debug("Marking page dirty.");
flush_dcache_page(page);
set_page_dirty(page);
need_writeback = 0;
}
ntfs_unmap_page(page);
}
page = ntfs_map_page(mapping, last_read_pos >>
PAGE_SHIFT);
if (IS_ERR(page)) {
err = PTR_ERR(page);
ntfs_error(vol->sb, "Failed to map page.");
goto out;
}
buf_size = last_read_pos & ~PAGE_MASK;
buf = page_address(page) + buf_size;
buf_size = PAGE_SIZE - buf_size;
if (unlikely(last_read_pos + buf_size > i_size))
buf_size = i_size - last_read_pos;
buf_size <<= 3;
lcn = bmp_pos & 7;
bmp_pos &= ~(LCN)7;
ntfs_debug("Before inner while loop: buf_size %i, lcn 0x%llx, "
"bmp_pos 0x%llx, need_writeback %i.", buf_size,
(unsigned long long)lcn,
(unsigned long long)bmp_pos, need_writeback);
while (lcn < buf_size && lcn + bmp_pos < zone_end) {
byte = buf + (lcn >> 3);
ntfs_debug("In inner while loop: buf_size %i, "
"lcn 0x%llx, bmp_pos 0x%llx, "
"need_writeback %i, byte ofs 0x%x, "
"*byte 0x%x.", buf_size,
(unsigned long long)lcn,
(unsigned long long)bmp_pos,
need_writeback,
(unsigned int)(lcn >> 3),
(unsigned int)*byte);
/* Skip full bytes. */
if (*byte == 0xff) {
lcn = (lcn + 8) & ~(LCN)7;
ntfs_debug("Continuing while loop 1.");
continue;
}
bit = 1 << (lcn & 7);
ntfs_debug("bit 0x%x.", bit);
/* If the bit is already set, go onto the next one. */
if (*byte & bit) {
lcn++;
ntfs_debug("Continuing while loop 2.");
continue;
}
/*
* Allocate more memory if needed, including space for
* the terminator element.
* ntfs_malloc_nofs() operates on whole pages only.
*/
if ((rlpos + 2) * sizeof(*rl) > rlsize) {
runlist_element *rl2;
ntfs_debug("Reallocating memory.");
if (!rl)
ntfs_debug("First free bit is at LCN "
"0x%llx.",
(unsigned long long)
(lcn + bmp_pos));
rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
if (unlikely(!rl2)) {
err = -ENOMEM;
ntfs_error(vol->sb, "Failed to "
"allocate memory.");
goto out;
}
memcpy(rl2, rl, rlsize);
ntfs_free(rl);
rl = rl2;
rlsize += PAGE_SIZE;
ntfs_debug("Reallocated memory, rlsize 0x%x.",
rlsize);
}
/* Allocate the bitmap bit. */
*byte |= bit;
/* We need to write this bitmap page to disk. */
need_writeback = 1;
ntfs_debug("*byte 0x%x, need_writeback is set.",
(unsigned int)*byte);
/*
* Coalesce with previous run if adjacent LCNs.
* Otherwise, append a new run.
*/
ntfs_debug("Adding run (lcn 0x%llx, len 0x%llx), "
"prev_lcn 0x%llx, lcn 0x%llx, "
"bmp_pos 0x%llx, prev_run_len 0x%llx, "
"rlpos %i.",
(unsigned long long)(lcn + bmp_pos),
1ULL, (unsigned long long)prev_lcn,
(unsigned long long)lcn,
(unsigned long long)bmp_pos,
(unsigned long long)prev_run_len,
rlpos);
if (prev_lcn == lcn + bmp_pos - prev_run_len && rlpos) {
ntfs_debug("Coalescing to run (lcn 0x%llx, "
"len 0x%llx).",
(unsigned long long)
rl[rlpos - 1].lcn,
(unsigned long long)
rl[rlpos - 1].length);
rl[rlpos - 1].length = ++prev_run_len;
ntfs_debug("Run now (lcn 0x%llx, len 0x%llx), "
"prev_run_len 0x%llx.",
(unsigned long long)
rl[rlpos - 1].lcn,
(unsigned long long)
rl[rlpos - 1].length,
(unsigned long long)
prev_run_len);
} else {
if (likely(rlpos)) {
ntfs_debug("Adding new run, (previous "
"run lcn 0x%llx, "
"len 0x%llx).",
(unsigned long long)
rl[rlpos - 1].lcn,
(unsigned long long)
rl[rlpos - 1].length);
rl[rlpos].vcn = rl[rlpos - 1].vcn +
prev_run_len;
} else {
ntfs_debug("Adding new run, is first "
"run.");
rl[rlpos].vcn = start_vcn;
}
rl[rlpos].lcn = prev_lcn = lcn + bmp_pos;
rl[rlpos].length = prev_run_len = 1;
rlpos++;
}
/* Done? */
if (!--clusters) {
LCN tc;
/*
* Update the current zone position. Positions
* of already scanned zones have been updated
* during the respective zone switches.
*/
tc = lcn + bmp_pos + 1;
ntfs_debug("Done. Updating current zone "
"position, tc 0x%llx, "
"search_zone %i.",
(unsigned long long)tc,
search_zone);
switch (search_zone) {
case 1:
ntfs_debug("Before checks, "
"vol->mft_zone_pos "
"0x%llx.",
(unsigned long long)
vol->mft_zone_pos);
if (tc >= vol->mft_zone_end) {
vol->mft_zone_pos =
vol->mft_lcn;
if (!vol->mft_zone_end)
vol->mft_zone_pos = 0;
} else if ((bmp_initial_pos >=
vol->mft_zone_pos ||
tc > vol->mft_zone_pos)
&& tc >= vol->mft_lcn)
vol->mft_zone_pos = tc;
ntfs_debug("After checks, "
"vol->mft_zone_pos "
"0x%llx.",
(unsigned long long)
vol->mft_zone_pos);
break;
case 2:
ntfs_debug("Before checks, "
"vol->data1_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data1_zone_pos);
if (tc >= vol->nr_clusters)
vol->data1_zone_pos =
vol->mft_zone_end;
else if ((bmp_initial_pos >=
vol->data1_zone_pos ||
tc > vol->data1_zone_pos)
&& tc >= vol->mft_zone_end)
vol->data1_zone_pos = tc;
ntfs_debug("After checks, "
"vol->data1_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data1_zone_pos);
break;
case 4:
ntfs_debug("Before checks, "
"vol->data2_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data2_zone_pos);
if (tc >= vol->mft_zone_start)
vol->data2_zone_pos = 0;
else if (bmp_initial_pos >=
vol->data2_zone_pos ||
tc > vol->data2_zone_pos)
vol->data2_zone_pos = tc;
ntfs_debug("After checks, "
"vol->data2_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data2_zone_pos);
break;
default:
BUG();
}
ntfs_debug("Finished. Going to out.");
goto out;
}
lcn++;
}
bmp_pos += buf_size;
ntfs_debug("After inner while loop: buf_size 0x%x, lcn "
"0x%llx, bmp_pos 0x%llx, need_writeback %i.",
buf_size, (unsigned long long)lcn,
(unsigned long long)bmp_pos, need_writeback);
if (bmp_pos < zone_end) {
ntfs_debug("Continuing outer while loop, "
"bmp_pos 0x%llx, zone_end 0x%llx.",
(unsigned long long)bmp_pos,
(unsigned long long)zone_end);
continue;
}
zone_pass_done: /* Finished with the current zone pass. */
ntfs_debug("At zone_pass_done, pass %i.", pass);
if (pass == 1) {
/*
* Now do pass 2, scanning the first part of the zone
* we omitted in pass 1.
*/
pass = 2;
zone_end = zone_start;
switch (search_zone) {
case 1: /* mft_zone */
zone_start = vol->mft_zone_start;
break;
case 2: /* data1_zone */
zone_start = vol->mft_zone_end;
break;
case 4: /* data2_zone */
zone_start = 0;
break;
default:
BUG();
}
/* Sanity check. */
if (zone_end < zone_start)
zone_end = zone_start;
bmp_pos = zone_start;
ntfs_debug("Continuing outer while loop, pass 2, "
"zone_start 0x%llx, zone_end 0x%llx, "
"bmp_pos 0x%llx.",
(unsigned long long)zone_start,
(unsigned long long)zone_end,
(unsigned long long)bmp_pos);
continue;
} /* pass == 2 */
done_zones_check:
ntfs_debug("At done_zones_check, search_zone %i, done_zones "
"before 0x%x, done_zones after 0x%x.",
search_zone, done_zones,
done_zones | search_zone);
done_zones |= search_zone;
if (done_zones < 7) {
ntfs_debug("Switching zone.");
/* Now switch to the next zone we haven't done yet. */
pass = 1;
switch (search_zone) {
case 1:
ntfs_debug("Switching from mft zone to data1 "
"zone.");
/* Update mft zone position. */
if (rlpos) {
LCN tc;
ntfs_debug("Before checks, "
"vol->mft_zone_pos "
"0x%llx.",
(unsigned long long)
vol->mft_zone_pos);
tc = rl[rlpos - 1].lcn +
rl[rlpos - 1].length;
if (tc >= vol->mft_zone_end) {
vol->mft_zone_pos =
vol->mft_lcn;
if (!vol->mft_zone_end)
vol->mft_zone_pos = 0;
} else if ((bmp_initial_pos >=
vol->mft_zone_pos ||
tc > vol->mft_zone_pos)
&& tc >= vol->mft_lcn)
vol->mft_zone_pos = tc;
ntfs_debug("After checks, "
"vol->mft_zone_pos "
"0x%llx.",
(unsigned long long)
vol->mft_zone_pos);
}
/* Switch from mft zone to data1 zone. */
switch_to_data1_zone: search_zone = 2;
zone_start = bmp_initial_pos =
vol->data1_zone_pos;
zone_end = vol->nr_clusters;
if (zone_start == vol->mft_zone_end)
pass = 2;
if (zone_start >= zone_end) {
vol->data1_zone_pos = zone_start =
vol->mft_zone_end;
pass = 2;
}
break;
case 2:
ntfs_debug("Switching from data1 zone to "
"data2 zone.");
/* Update data1 zone position. */
if (rlpos) {
LCN tc;
ntfs_debug("Before checks, "
"vol->data1_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data1_zone_pos);
tc = rl[rlpos - 1].lcn +
rl[rlpos - 1].length;
if (tc >= vol->nr_clusters)
vol->data1_zone_pos =
vol->mft_zone_end;
else if ((bmp_initial_pos >=
vol->data1_zone_pos ||
tc > vol->data1_zone_pos)
&& tc >= vol->mft_zone_end)
vol->data1_zone_pos = tc;
ntfs_debug("After checks, "
"vol->data1_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data1_zone_pos);
}
/* Switch from data1 zone to data2 zone. */
search_zone = 4;
zone_start = bmp_initial_pos =
vol->data2_zone_pos;
zone_end = vol->mft_zone_start;
if (!zone_start)
pass = 2;
if (zone_start >= zone_end) {
vol->data2_zone_pos = zone_start =
bmp_initial_pos = 0;
pass = 2;
}
break;
case 4:
ntfs_debug("Switching from data2 zone to "
"data1 zone.");
/* Update data2 zone position. */
if (rlpos) {
LCN tc;
ntfs_debug("Before checks, "
"vol->data2_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data2_zone_pos);
tc = rl[rlpos - 1].lcn +
rl[rlpos - 1].length;
if (tc >= vol->mft_zone_start)
vol->data2_zone_pos = 0;
else if (bmp_initial_pos >=
vol->data2_zone_pos ||
tc > vol->data2_zone_pos)
vol->data2_zone_pos = tc;
ntfs_debug("After checks, "
"vol->data2_zone_pos "
"0x%llx.",
(unsigned long long)
vol->data2_zone_pos);
}
/* Switch from data2 zone to data1 zone. */
goto switch_to_data1_zone;
default:
BUG();
}
ntfs_debug("After zone switch, search_zone %i, "
"pass %i, bmp_initial_pos 0x%llx, "
"zone_start 0x%llx, zone_end 0x%llx.",
search_zone, pass,
(unsigned long long)bmp_initial_pos,
(unsigned long long)zone_start,
(unsigned long long)zone_end);
bmp_pos = zone_start;
if (zone_start == zone_end) {
ntfs_debug("Empty zone, going to "
"done_zones_check.");
/* Empty zone. Don't bother searching it. */
goto done_zones_check;
}
ntfs_debug("Continuing outer while loop.");
continue;
} /* done_zones == 7 */
ntfs_debug("All zones are finished.");
/*
* All zones are finished! If DATA_ZONE, shrink mft zone. If
* MFT_ZONE, we have really run out of space.
*/
mft_zone_size = vol->mft_zone_end - vol->mft_zone_start;
ntfs_debug("vol->mft_zone_start 0x%llx, vol->mft_zone_end "
"0x%llx, mft_zone_size 0x%llx.",
(unsigned long long)vol->mft_zone_start,
(unsigned long long)vol->mft_zone_end,
(unsigned long long)mft_zone_size);
if (zone == MFT_ZONE || mft_zone_size <= 0) {
ntfs_debug("No free clusters left, going to out.");
/* Really no more space left on device. */
err = -ENOSPC;
goto out;
} /* zone == DATA_ZONE && mft_zone_size > 0 */
ntfs_debug("Shrinking mft zone.");
zone_end = vol->mft_zone_end;
mft_zone_size >>= 1;
if (mft_zone_size > 0)
vol->mft_zone_end = vol->mft_zone_start + mft_zone_size;
else /* mft zone and data2 zone no longer exist. */
vol->data2_zone_pos = vol->mft_zone_start =
vol->mft_zone_end = 0;
if (vol->mft_zone_pos >= vol->mft_zone_end) {
vol->mft_zone_pos = vol->mft_lcn;
if (!vol->mft_zone_end)
vol->mft_zone_pos = 0;
}
bmp_pos = zone_start = bmp_initial_pos =
vol->data1_zone_pos = vol->mft_zone_end;
search_zone = 2;
pass = 2;
done_zones &= ~2;
ntfs_debug("After shrinking mft zone, mft_zone_size 0x%llx, "
"vol->mft_zone_start 0x%llx, "
"vol->mft_zone_end 0x%llx, "
"vol->mft_zone_pos 0x%llx, search_zone 2, "
"pass 2, dones_zones 0x%x, zone_start 0x%llx, "
"zone_end 0x%llx, vol->data1_zone_pos 0x%llx, "
"continuing outer while loop.",
(unsigned long long)mft_zone_size,
(unsigned long long)vol->mft_zone_start,
(unsigned long long)vol->mft_zone_end,
(unsigned long long)vol->mft_zone_pos,
done_zones, (unsigned long long)zone_start,
(unsigned long long)zone_end,
(unsigned long long)vol->data1_zone_pos);
}
ntfs_debug("After outer while loop.");
out:
ntfs_debug("At out.");
/* Add runlist terminator element. */
if (likely(rl)) {
rl[rlpos].vcn = rl[rlpos - 1].vcn + rl[rlpos - 1].length;
rl[rlpos].lcn = is_extension ? LCN_ENOENT : LCN_RL_NOT_MAPPED;
rl[rlpos].length = 0;
}
if (likely(page && !IS_ERR(page))) {
if (need_writeback) {
ntfs_debug("Marking page dirty.");
flush_dcache_page(page);
set_page_dirty(page);
need_writeback = 0;
}
ntfs_unmap_page(page);
}
if (likely(!err)) {
up_write(&vol->lcnbmp_lock);
ntfs_debug("Done.");
return rl;
}
ntfs_error(vol->sb, "Failed to allocate clusters, aborting "
"(error %i).", err);
if (rl) {
int err2;
if (err == -ENOSPC)
ntfs_debug("Not enough space to complete allocation, "
"err -ENOSPC, first free lcn 0x%llx, "
"could allocate up to 0x%llx "
"clusters.",
(unsigned long long)rl[0].lcn,
(unsigned long long)(count - clusters));
/* Deallocate all allocated clusters. */
ntfs_debug("Attempting rollback...");
err2 = ntfs_cluster_free_from_rl_nolock(vol, rl);
if (err2) {
ntfs_error(vol->sb, "Failed to rollback (error %i). "
"Leaving inconsistent metadata! "
"Unmount and run chkdsk.", err2);
NVolSetErrors(vol);
}
/* Free the runlist. */
ntfs_free(rl);
} else if (err == -ENOSPC)
ntfs_debug("No space left at all, err = -ENOSPC, first free "
"lcn = 0x%llx.",
(long long)vol->data1_zone_pos);
up_write(&vol->lcnbmp_lock);
return ERR_PTR(err);
}
/**
* __ntfs_cluster_free - free clusters on an ntfs volume
* @ni: ntfs inode whose runlist describes the clusters to free
* @start_vcn: vcn in the runlist of @ni at which to start freeing clusters
* @count: number of clusters to free or -1 for all clusters
* @ctx: active attribute search context if present or NULL if not
* @is_rollback: true if this is a rollback operation
*
* Free @count clusters starting at the cluster @start_vcn in the runlist
* described by the vfs inode @ni.
*
* If @count is -1, all clusters from @start_vcn to the end of the runlist are
* deallocated. Thus, to completely free all clusters in a runlist, use
* @start_vcn = 0 and @count = -1.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when __ntfs_cluster_free() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and __ntfs_cluster_free() will
* perform the necessary mapping and unmapping.
*
* Note, __ntfs_cluster_free() saves the state of @ctx on entry and restores it
* before returning. Thus, @ctx will be left pointing to the same attribute on
* return as on entry. However, the actual pointers in @ctx may point to
* different memory locations on return, so you must remember to reset any
* cached pointers from the @ctx, i.e. after the call to __ntfs_cluster_free(),
* you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
*
* @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_cluster_free() instead.
*
* Note, __ntfs_cluster_free() does not modify the runlist, so you have to
* remove from the runlist or mark sparse the freed runs later.
*
* Return the number of deallocated clusters (not counting sparse ones) on
* success and -errno on error.
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist may be modified when
* needed runlist fragments need to be mapped.
* - The volume lcn bitmap must be unlocked on entry and is unlocked
* on return.
* - This function takes the volume lcn bitmap lock for writing and
* modifies the bitmap contents.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count,
ntfs_attr_search_ctx *ctx, const bool is_rollback)
{
s64 delta, to_free, total_freed, real_freed;
ntfs_volume *vol;
struct inode *lcnbmp_vi;
runlist_element *rl;
int err;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count "
"0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn,
(unsigned long long)count,
is_rollback ? " (rollback)" : "");
vol = ni->vol;
lcnbmp_vi = vol->lcnbmp_ino;
BUG_ON(!lcnbmp_vi);
BUG_ON(start_vcn < 0);
BUG_ON(count < -1);
/*
* Lock the lcn bitmap for writing but only if not rolling back. We
* must hold the lock all the way including through rollback otherwise
* rollback is not possible because once we have cleared a bit and
* dropped the lock, anyone could have set the bit again, thus
* allocating the cluster for another use.
*/
if (likely(!is_rollback))
down_write(&vol->lcnbmp_lock);
total_freed = real_freed = 0;
rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, ctx);
if (IS_ERR(rl)) {
if (!is_rollback)
ntfs_error(vol->sb, "Failed to find first runlist "
"element (error %li), aborting.",
PTR_ERR(rl));
err = PTR_ERR(rl);
goto err_out;
}
if (unlikely(rl->lcn < LCN_HOLE)) {
if (!is_rollback)
ntfs_error(vol->sb, "First runlist element has "
"invalid lcn, aborting.");
err = -EIO;
goto err_out;
}
/* Find the starting cluster inside the run that needs freeing. */
delta = start_vcn - rl->vcn;
/* The number of clusters in this run that need freeing. */
to_free = rl->length - delta;
if (count >= 0 && to_free > count)
to_free = count;
if (likely(rl->lcn >= 0)) {
/* Do the actual freeing of the clusters in this run. */
err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn + delta,
to_free, likely(!is_rollback) ? 0 : 1);
if (unlikely(err)) {
if (!is_rollback)
ntfs_error(vol->sb, "Failed to clear first run "
"(error %i), aborting.", err);
goto err_out;
}
/* We have freed @to_free real clusters. */
real_freed = to_free;
};
/* Go to the next run and adjust the number of clusters left to free. */
++rl;
if (count >= 0)
count -= to_free;
/* Keep track of the total "freed" clusters, including sparse ones. */
total_freed = to_free;
/*
* Loop over the remaining runs, using @count as a capping value, and
* free them.
*/
for (; rl->length && count != 0; ++rl) {
if (unlikely(rl->lcn < LCN_HOLE)) {
VCN vcn;
/* Attempt to map runlist. */
vcn = rl->vcn;
rl = ntfs_attr_find_vcn_nolock(ni, vcn, ctx);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (!is_rollback)
ntfs_error(vol->sb, "Failed to map "
"runlist fragment or "
"failed to find "
"subsequent runlist "
"element.");
goto err_out;
}
if (unlikely(rl->lcn < LCN_HOLE)) {
if (!is_rollback)
ntfs_error(vol->sb, "Runlist element "
"has invalid lcn "
"(0x%llx).",
(unsigned long long)
rl->lcn);
err = -EIO;
goto err_out;
}
}
/* The number of clusters in this run that need freeing. */
to_free = rl->length;
if (count >= 0 && to_free > count)
to_free = count;
if (likely(rl->lcn >= 0)) {
/* Do the actual freeing of the clusters in the run. */
err = ntfs_bitmap_set_bits_in_run(lcnbmp_vi, rl->lcn,
to_free, likely(!is_rollback) ? 0 : 1);
if (unlikely(err)) {
if (!is_rollback)
ntfs_error(vol->sb, "Failed to clear "
"subsequent run.");
goto err_out;
}
/* We have freed @to_free real clusters. */
real_freed += to_free;
}
/* Adjust the number of clusters left to free. */
if (count >= 0)
count -= to_free;
/* Update the total done clusters. */
total_freed += to_free;
}
if (likely(!is_rollback))
up_write(&vol->lcnbmp_lock);
BUG_ON(count > 0);
/* We are done. Return the number of actually freed clusters. */
ntfs_debug("Done.");
return real_freed;
err_out:
if (is_rollback)
return err;
/* If no real clusters were freed, no need to rollback. */
if (!real_freed) {
up_write(&vol->lcnbmp_lock);
return err;
}
/*
* Attempt to rollback and if that succeeds just return the error code.
* If rollback fails, set the volume errors flag, emit an error
* message, and return the error code.
*/
delta = __ntfs_cluster_free(ni, start_vcn, total_freed, ctx, true);
if (delta < 0) {
ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving "
"inconsistent metadata! Unmount and run "
"chkdsk.", (int)delta);
NVolSetErrors(vol);
}
up_write(&vol->lcnbmp_lock);
ntfs_error(vol->sb, "Aborting (error %i).", err);
return err;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/lcnalloc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dir.c - NTFS kernel directory operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002 Richard Russon
*/
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include "dir.h"
#include "aops.h"
#include "attrib.h"
#include "mft.h"
#include "debug.h"
#include "ntfs.h"
/*
* The little endian Unicode string $I30 as a global constant.
*/
ntfschar I30[5] = { cpu_to_le16('$'), cpu_to_le16('I'),
cpu_to_le16('3'), cpu_to_le16('0'), 0 };
/**
* ntfs_lookup_inode_by_name - find an inode in a directory given its name
* @dir_ni: ntfs inode of the directory in which to search for the name
* @uname: Unicode name for which to search in the directory
* @uname_len: length of the name @uname in Unicode characters
* @res: return the found file name if necessary (see below)
*
* Look for an inode with name @uname in the directory with inode @dir_ni.
* ntfs_lookup_inode_by_name() walks the contents of the directory looking for
* the Unicode name. If the name is found in the directory, the corresponding
* inode number (>= 0) is returned as a mft reference in cpu format, i.e. it
* is a 64-bit number containing the sequence number.
*
* On error, a negative value is returned corresponding to the error code. In
* particular if the inode is not found -ENOENT is returned. Note that you
* can't just check the return value for being negative, you have to check the
* inode number for being negative which you can extract using MREC(return
* value).
*
* Note, @uname_len does not include the (optional) terminating NULL character.
*
* Note, we look for a case sensitive match first but we also look for a case
* insensitive match at the same time. If we find a case insensitive match, we
* save that for the case that we don't find an exact match, where we return
* the case insensitive match and setup @res (which we allocate!) with the mft
* reference, the file name type, length and with a copy of the little endian
* Unicode file name itself. If we match a file name which is in the DOS name
* space, we only return the mft reference and file name type in @res.
* ntfs_lookup() then uses this to find the long file name in the inode itself.
* This is to avoid polluting the dcache with short file names. We want them to
* work but we don't care for how quickly one can access them. This also fixes
* the dcache aliasing issues.
*
* Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
* applies the mst protection fixups before writing out and then
* removes them again after the write is complete after which it
* unlocks the page.
*/
MFT_REF ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
const int uname_len, ntfs_name **res)
{
ntfs_volume *vol = dir_ni->vol;
struct super_block *sb = vol->sb;
MFT_RECORD *m;
INDEX_ROOT *ir;
INDEX_ENTRY *ie;
INDEX_ALLOCATION *ia;
u8 *index_end;
u64 mref;
ntfs_attr_search_ctx *ctx;
int err, rc;
VCN vcn, old_vcn;
struct address_space *ia_mapping;
struct page *page;
u8 *kaddr;
ntfs_name *name = NULL;
BUG_ON(!S_ISDIR(VFS_I(dir_ni)->i_mode));
BUG_ON(NInoAttr(dir_ni));
/* Get hold of the mft record for the directory. */
m = map_mft_record(dir_ni);
if (IS_ERR(m)) {
ntfs_error(sb, "map_mft_record() failed with error code %ld.",
-PTR_ERR(m));
return ERR_MREF(PTR_ERR(m));
}
ctx = ntfs_attr_get_search_ctx(dir_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
/* Find the index root attribute in the mft record. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
0, ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
ntfs_error(sb, "Index root attribute missing in "
"directory inode 0x%lx.",
dir_ni->mft_no);
err = -EIO;
}
goto err_out;
}
/* Get to the index root value (it's been verified in read_inode). */
ir = (INDEX_ROOT*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ir->index +
le32_to_cpu(ir->index.entries_offset));
/*
* Loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds checks. */
if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->key_length) >
index_end)
goto dir_err_out;
/*
* The last entry cannot contain a name. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/*
* We perform a case sensitive comparison and if that matches
* we are done and return the mft reference of the inode (i.e.
* the inode number together with the sequence number for
* consistency checking). We convert it to cpu format before
* returning.
*/
if (ntfs_are_names_equal(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length,
CASE_SENSITIVE, vol->upcase, vol->upcase_len)) {
found_it:
/*
* We have a perfect match, so we don't need to care
* about having matched imperfectly before, so we can
* free name and set *res to NULL.
* However, if the perfect match is a short file name,
* we need to signal this through *res, so that
* ntfs_lookup() can fix dcache aliasing issues.
* As an optimization we just reuse an existing
* allocation of *res.
*/
if (ie->key.file_name.file_name_type == FILE_NAME_DOS) {
if (!name) {
name = kmalloc(sizeof(ntfs_name),
GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto err_out;
}
}
name->mref = le64_to_cpu(
ie->data.dir.indexed_file);
name->type = FILE_NAME_DOS;
name->len = 0;
*res = name;
} else {
kfree(name);
*res = NULL;
}
mref = le64_to_cpu(ie->data.dir.indexed_file);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(dir_ni);
return mref;
}
/*
* For a case insensitive mount, we also perform a case
* insensitive comparison (provided the file name is not in the
* POSIX namespace). If the comparison matches, and the name is
* in the WIN32 namespace, we cache the filename in *res so
* that the caller, ntfs_lookup(), can work on it. If the
* comparison matches, and the name is in the DOS namespace, we
* only cache the mft reference and the file name type (we set
* the name length to zero for simplicity).
*/
if (!NVolCaseSensitive(vol) &&
ie->key.file_name.file_name_type &&
ntfs_are_names_equal(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length,
IGNORE_CASE, vol->upcase, vol->upcase_len)) {
int name_size = sizeof(ntfs_name);
u8 type = ie->key.file_name.file_name_type;
u8 len = ie->key.file_name.file_name_length;
/* Only one case insensitive matching name allowed. */
if (name) {
ntfs_error(sb, "Found already allocated name "
"in phase 1. Please run chkdsk "
"and if that doesn't find any "
"errors please report you saw "
"this message to "
"linux-ntfs-dev@lists."
"sourceforge.net.");
goto dir_err_out;
}
if (type != FILE_NAME_DOS)
name_size += len * sizeof(ntfschar);
name = kmalloc(name_size, GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto err_out;
}
name->mref = le64_to_cpu(ie->data.dir.indexed_file);
name->type = type;
if (type != FILE_NAME_DOS) {
name->len = len;
memcpy(name->name, ie->key.file_name.file_name,
len * sizeof(ntfschar));
} else
name->len = 0;
*res = name;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
IGNORE_CASE, vol->upcase, vol->upcase_len);
/*
* If uname collates before the name of the current entry, there
* is definitely no such name in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/* The names are not equal, continue the search. */
if (rc)
continue;
/*
* Names match with case insensitive comparison, now try the
* case sensitive comparison, which is required for proper
* collation.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
CASE_SENSITIVE, vol->upcase, vol->upcase_len);
if (rc == -1)
break;
if (rc)
continue;
/*
* Perfect match, this will never happen as the
* ntfs_are_names_equal() call will have gotten a match but we
* still treat it correctly.
*/
goto found_it;
}
/*
* We have finished with this index without success. Check for the
* presence of a child node and if not present return -ENOENT, unless
* we have got a matching name cached in name in which case return the
* mft reference associated with it.
*/
if (!(ie->flags & INDEX_ENTRY_NODE)) {
if (name) {
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(dir_ni);
return name->mref;
}
ntfs_debug("Entry not found.");
err = -ENOENT;
goto err_out;
} /* Child node present, descend into it. */
/* Consistency check: Verify that an index allocation exists. */
if (!NInoIndexAllocPresent(dir_ni)) {
ntfs_error(sb, "No index allocation attribute but index entry "
"requires one. Directory inode 0x%lx is "
"corrupt or driver bug.", dir_ni->mft_no);
goto err_out;
}
/* Get the starting vcn of the index_block holding the child node. */
vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
ia_mapping = VFS_I(dir_ni)->i_mapping;
/*
* We are done with the index root and the mft record. Release them,
* otherwise we deadlock with ntfs_map_page().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(dir_ni);
m = NULL;
ctx = NULL;
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
* of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
err = PTR_ERR(page);
goto err_out;
}
lock_page(page);
kaddr = (u8*)page_address(page);
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", dir_ni->mft_no);
goto unm_err_out;
}
/* Catch multi sector transfer fixup errors. */
if (unlikely(!ntfs_is_indx_record(ia->magic))) {
ntfs_error(sb, "Directory index record with vcn 0x%llx is "
"corrupt. Corrupt inode 0x%lx. Run chkdsk.",
(unsigned long long)vcn, dir_ni->mft_no);
goto unm_err_out;
}
if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
"different from expected VCN (0x%llx). "
"Directory inode 0x%lx is corrupt or driver "
"bug.", (unsigned long long)
sle64_to_cpu(ia->index_block_vcn),
(unsigned long long)vcn, dir_ni->mft_no);
goto unm_err_out;
}
if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
dir_ni->itype.index.block_size) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx has a size (%u) differing from the "
"directory specified size (%u). Directory "
"inode is corrupt or driver bug.",
(unsigned long long)vcn, dir_ni->mft_no,
le32_to_cpu(ia->index.allocated_size) + 0x18,
dir_ni->itype.index.block_size);
goto unm_err_out;
}
index_end = (u8*)ia + dir_ni->itype.index.block_size;
if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
"driver.", (unsigned long long)vcn,
dir_ni->mft_no);
goto unm_err_out;
}
index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
if (index_end > (u8*)ia + dir_ni->itype.index.block_size) {
ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "
"inode 0x%lx exceeds maximum size.",
(unsigned long long)vcn, dir_ni->mft_no);
goto unm_err_out;
}
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ia->index +
le32_to_cpu(ia->index.entries_offset));
/*
* Iterate similar to above big loop but applied to index buffer, thus
* loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds check. */
if ((u8*)ie < (u8*)ia || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->key_length) >
index_end) {
ntfs_error(sb, "Index entry out of bounds in "
"directory inode 0x%lx.",
dir_ni->mft_no);
goto unm_err_out;
}
/*
* The last entry cannot contain a name. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/*
* We perform a case sensitive comparison and if that matches
* we are done and return the mft reference of the inode (i.e.
* the inode number together with the sequence number for
* consistency checking). We convert it to cpu format before
* returning.
*/
if (ntfs_are_names_equal(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length,
CASE_SENSITIVE, vol->upcase, vol->upcase_len)) {
found_it2:
/*
* We have a perfect match, so we don't need to care
* about having matched imperfectly before, so we can
* free name and set *res to NULL.
* However, if the perfect match is a short file name,
* we need to signal this through *res, so that
* ntfs_lookup() can fix dcache aliasing issues.
* As an optimization we just reuse an existing
* allocation of *res.
*/
if (ie->key.file_name.file_name_type == FILE_NAME_DOS) {
if (!name) {
name = kmalloc(sizeof(ntfs_name),
GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto unm_err_out;
}
}
name->mref = le64_to_cpu(
ie->data.dir.indexed_file);
name->type = FILE_NAME_DOS;
name->len = 0;
*res = name;
} else {
kfree(name);
*res = NULL;
}
mref = le64_to_cpu(ie->data.dir.indexed_file);
unlock_page(page);
ntfs_unmap_page(page);
return mref;
}
/*
* For a case insensitive mount, we also perform a case
* insensitive comparison (provided the file name is not in the
* POSIX namespace). If the comparison matches, and the name is
* in the WIN32 namespace, we cache the filename in *res so
* that the caller, ntfs_lookup(), can work on it. If the
* comparison matches, and the name is in the DOS namespace, we
* only cache the mft reference and the file name type (we set
* the name length to zero for simplicity).
*/
if (!NVolCaseSensitive(vol) &&
ie->key.file_name.file_name_type &&
ntfs_are_names_equal(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length,
IGNORE_CASE, vol->upcase, vol->upcase_len)) {
int name_size = sizeof(ntfs_name);
u8 type = ie->key.file_name.file_name_type;
u8 len = ie->key.file_name.file_name_length;
/* Only one case insensitive matching name allowed. */
if (name) {
ntfs_error(sb, "Found already allocated name "
"in phase 2. Please run chkdsk "
"and if that doesn't find any "
"errors please report you saw "
"this message to "
"linux-ntfs-dev@lists."
"sourceforge.net.");
unlock_page(page);
ntfs_unmap_page(page);
goto dir_err_out;
}
if (type != FILE_NAME_DOS)
name_size += len * sizeof(ntfschar);
name = kmalloc(name_size, GFP_NOFS);
if (!name) {
err = -ENOMEM;
goto unm_err_out;
}
name->mref = le64_to_cpu(ie->data.dir.indexed_file);
name->type = type;
if (type != FILE_NAME_DOS) {
name->len = len;
memcpy(name->name, ie->key.file_name.file_name,
len * sizeof(ntfschar));
} else
name->len = 0;
*res = name;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
IGNORE_CASE, vol->upcase, vol->upcase_len);
/*
* If uname collates before the name of the current entry, there
* is definitely no such name in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/* The names are not equal, continue the search. */
if (rc)
continue;
/*
* Names match with case insensitive comparison, now try the
* case sensitive comparison, which is required for proper
* collation.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
CASE_SENSITIVE, vol->upcase, vol->upcase_len);
if (rc == -1)
break;
if (rc)
continue;
/*
* Perfect match, this will never happen as the
* ntfs_are_names_equal() call will have gotten a match but we
* still treat it correctly.
*/
goto found_it2;
}
/*
* We have finished with this index buffer without success. Check for
* the presence of a child node.
*/
if (ie->flags & INDEX_ENTRY_NODE) {
if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
ntfs_error(sb, "Index entry with child node found in "
"a leaf node in directory inode 0x%lx.",
dir_ni->mft_no);
goto unm_err_out;
}
/* Child node present, descend into it. */
old_vcn = vcn;
vcn = sle64_to_cpup((sle64*)((u8*)ie +
le16_to_cpu(ie->length) - 8));
if (vcn >= 0) {
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
if (old_vcn << vol->cluster_size_bits >>
PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
goto descend_into_child_node;
}
ntfs_error(sb, "Negative child node vcn in directory inode "
"0x%lx.", dir_ni->mft_no);
goto unm_err_out;
}
/*
* No child node present, return -ENOENT, unless we have got a matching
* name cached in name in which case return the mft reference
* associated with it.
*/
if (name) {
unlock_page(page);
ntfs_unmap_page(page);
return name->mref;
}
ntfs_debug("Entry not found.");
err = -ENOENT;
unm_err_out:
unlock_page(page);
ntfs_unmap_page(page);
err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(dir_ni);
if (name) {
kfree(name);
*res = NULL;
}
return ERR_MREF(err);
dir_err_out:
ntfs_error(sb, "Corrupt directory. Aborting lookup.");
goto err_out;
}
#if 0
// TODO: (AIA)
// The algorithm embedded in this code will be required for the time when we
// want to support adding of entries to directories, where we require correct
// collation of file names in order not to cause corruption of the filesystem.
/**
* ntfs_lookup_inode_by_name - find an inode in a directory given its name
* @dir_ni: ntfs inode of the directory in which to search for the name
* @uname: Unicode name for which to search in the directory
* @uname_len: length of the name @uname in Unicode characters
*
* Look for an inode with name @uname in the directory with inode @dir_ni.
* ntfs_lookup_inode_by_name() walks the contents of the directory looking for
* the Unicode name. If the name is found in the directory, the corresponding
* inode number (>= 0) is returned as a mft reference in cpu format, i.e. it
* is a 64-bit number containing the sequence number.
*
* On error, a negative value is returned corresponding to the error code. In
* particular if the inode is not found -ENOENT is returned. Note that you
* can't just check the return value for being negative, you have to check the
* inode number for being negative which you can extract using MREC(return
* value).
*
* Note, @uname_len does not include the (optional) terminating NULL character.
*/
u64 ntfs_lookup_inode_by_name(ntfs_inode *dir_ni, const ntfschar *uname,
const int uname_len)
{
ntfs_volume *vol = dir_ni->vol;
struct super_block *sb = vol->sb;
MFT_RECORD *m;
INDEX_ROOT *ir;
INDEX_ENTRY *ie;
INDEX_ALLOCATION *ia;
u8 *index_end;
u64 mref;
ntfs_attr_search_ctx *ctx;
int err, rc;
IGNORE_CASE_BOOL ic;
VCN vcn, old_vcn;
struct address_space *ia_mapping;
struct page *page;
u8 *kaddr;
/* Get hold of the mft record for the directory. */
m = map_mft_record(dir_ni);
if (IS_ERR(m)) {
ntfs_error(sb, "map_mft_record() failed with error code %ld.",
-PTR_ERR(m));
return ERR_MREF(PTR_ERR(m));
}
ctx = ntfs_attr_get_search_ctx(dir_ni, m);
if (!ctx) {
err = -ENOMEM;
goto err_out;
}
/* Find the index root attribute in the mft record. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
0, ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
ntfs_error(sb, "Index root attribute missing in "
"directory inode 0x%lx.",
dir_ni->mft_no);
err = -EIO;
}
goto err_out;
}
/* Get to the index root value (it's been verified in read_inode). */
ir = (INDEX_ROOT*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ir->index +
le32_to_cpu(ir->index.entries_offset));
/*
* Loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds checks. */
if ((u8*)ie < (u8*)ctx->mrec || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->key_length) >
index_end)
goto dir_err_out;
/*
* The last entry cannot contain a name. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/*
* If the current entry has a name type of POSIX, the name is
* case sensitive and not otherwise. This has the effect of us
* not being able to access any POSIX file names which collate
* after the non-POSIX one when they only differ in case, but
* anyone doing screwy stuff like that deserves to burn in
* hell... Doing that kind of stuff on NT4 actually causes
* corruption on the partition even when using SP6a and Linux
* is not involved at all.
*/
ic = ie->key.file_name.file_name_type ? IGNORE_CASE :
CASE_SENSITIVE;
/*
* If the names match perfectly, we are done and return the
* mft reference of the inode (i.e. the inode number together
* with the sequence number for consistency checking. We
* convert it to cpu format before returning.
*/
if (ntfs_are_names_equal(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, ic,
vol->upcase, vol->upcase_len)) {
found_it:
mref = le64_to_cpu(ie->data.dir.indexed_file);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(dir_ni);
return mref;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
IGNORE_CASE, vol->upcase, vol->upcase_len);
/*
* If uname collates before the name of the current entry, there
* is definitely no such name in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/* The names are not equal, continue the search. */
if (rc)
continue;
/*
* Names match with case insensitive comparison, now try the
* case sensitive comparison, which is required for proper
* collation.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
CASE_SENSITIVE, vol->upcase, vol->upcase_len);
if (rc == -1)
break;
if (rc)
continue;
/*
* Perfect match, this will never happen as the
* ntfs_are_names_equal() call will have gotten a match but we
* still treat it correctly.
*/
goto found_it;
}
/*
* We have finished with this index without success. Check for the
* presence of a child node.
*/
if (!(ie->flags & INDEX_ENTRY_NODE)) {
/* No child node, return -ENOENT. */
err = -ENOENT;
goto err_out;
} /* Child node present, descend into it. */
/* Consistency check: Verify that an index allocation exists. */
if (!NInoIndexAllocPresent(dir_ni)) {
ntfs_error(sb, "No index allocation attribute but index entry "
"requires one. Directory inode 0x%lx is "
"corrupt or driver bug.", dir_ni->mft_no);
goto err_out;
}
/* Get the starting vcn of the index_block holding the child node. */
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8);
ia_mapping = VFS_I(dir_ni)->i_mapping;
/*
* We are done with the index root and the mft record. Release them,
* otherwise we deadlock with ntfs_map_page().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(dir_ni);
m = NULL;
ctx = NULL;
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
* of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map directory index page, error %ld.",
-PTR_ERR(page));
err = PTR_ERR(page);
goto err_out;
}
lock_page(page);
kaddr = (u8*)page_address(page);
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", dir_ni->mft_no);
goto unm_err_out;
}
/* Catch multi sector transfer fixup errors. */
if (unlikely(!ntfs_is_indx_record(ia->magic))) {
ntfs_error(sb, "Directory index record with vcn 0x%llx is "
"corrupt. Corrupt inode 0x%lx. Run chkdsk.",
(unsigned long long)vcn, dir_ni->mft_no);
goto unm_err_out;
}
if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
"different from expected VCN (0x%llx). "
"Directory inode 0x%lx is corrupt or driver "
"bug.", (unsigned long long)
sle64_to_cpu(ia->index_block_vcn),
(unsigned long long)vcn, dir_ni->mft_no);
goto unm_err_out;
}
if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
dir_ni->itype.index.block_size) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx has a size (%u) differing from the "
"directory specified size (%u). Directory "
"inode is corrupt or driver bug.",
(unsigned long long)vcn, dir_ni->mft_no,
le32_to_cpu(ia->index.allocated_size) + 0x18,
dir_ni->itype.index.block_size);
goto unm_err_out;
}
index_end = (u8*)ia + dir_ni->itype.index.block_size;
if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
"driver.", (unsigned long long)vcn,
dir_ni->mft_no);
goto unm_err_out;
}
index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
if (index_end > (u8*)ia + dir_ni->itype.index.block_size) {
ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "
"inode 0x%lx exceeds maximum size.",
(unsigned long long)vcn, dir_ni->mft_no);
goto unm_err_out;
}
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ia->index +
le32_to_cpu(ia->index.entries_offset));
/*
* Iterate similar to above big loop but applied to index buffer, thus
* loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds check. */
if ((u8*)ie < (u8*)ia || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->key_length) >
index_end) {
ntfs_error(sb, "Index entry out of bounds in "
"directory inode 0x%lx.",
dir_ni->mft_no);
goto unm_err_out;
}
/*
* The last entry cannot contain a name. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/*
* If the current entry has a name type of POSIX, the name is
* case sensitive and not otherwise. This has the effect of us
* not being able to access any POSIX file names which collate
* after the non-POSIX one when they only differ in case, but
* anyone doing screwy stuff like that deserves to burn in
* hell... Doing that kind of stuff on NT4 actually causes
* corruption on the partition even when using SP6a and Linux
* is not involved at all.
*/
ic = ie->key.file_name.file_name_type ? IGNORE_CASE :
CASE_SENSITIVE;
/*
* If the names match perfectly, we are done and return the
* mft reference of the inode (i.e. the inode number together
* with the sequence number for consistency checking. We
* convert it to cpu format before returning.
*/
if (ntfs_are_names_equal(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, ic,
vol->upcase, vol->upcase_len)) {
found_it2:
mref = le64_to_cpu(ie->data.dir.indexed_file);
unlock_page(page);
ntfs_unmap_page(page);
return mref;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
IGNORE_CASE, vol->upcase, vol->upcase_len);
/*
* If uname collates before the name of the current entry, there
* is definitely no such name in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/* The names are not equal, continue the search. */
if (rc)
continue;
/*
* Names match with case insensitive comparison, now try the
* case sensitive comparison, which is required for proper
* collation.
*/
rc = ntfs_collate_names(uname, uname_len,
(ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, 1,
CASE_SENSITIVE, vol->upcase, vol->upcase_len);
if (rc == -1)
break;
if (rc)
continue;
/*
* Perfect match, this will never happen as the
* ntfs_are_names_equal() call will have gotten a match but we
* still treat it correctly.
*/
goto found_it2;
}
/*
* We have finished with this index buffer without success. Check for
* the presence of a child node.
*/
if (ie->flags & INDEX_ENTRY_NODE) {
if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
ntfs_error(sb, "Index entry with child node found in "
"a leaf node in directory inode 0x%lx.",
dir_ni->mft_no);
goto unm_err_out;
}
/* Child node present, descend into it. */
old_vcn = vcn;
vcn = sle64_to_cpup((u8*)ie + le16_to_cpu(ie->length) - 8);
if (vcn >= 0) {
/* If vcn is in the same page cache page as old_vcn we
* recycle the mapped page. */
if (old_vcn << vol->cluster_size_bits >>
PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
goto descend_into_child_node;
}
ntfs_error(sb, "Negative child node vcn in directory inode "
"0x%lx.", dir_ni->mft_no);
goto unm_err_out;
}
/* No child node, return -ENOENT. */
ntfs_debug("Entry not found.");
err = -ENOENT;
unm_err_out:
unlock_page(page);
ntfs_unmap_page(page);
err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(dir_ni);
return ERR_MREF(err);
dir_err_out:
ntfs_error(sb, "Corrupt directory. Aborting lookup.");
goto err_out;
}
#endif
/**
* ntfs_filldir - ntfs specific filldir method
* @vol: current ntfs volume
* @ndir: ntfs inode of current directory
* @ia_page: page in which the index allocation buffer @ie is in resides
* @ie: current index entry
* @name: buffer to use for the converted name
* @actor: what to feed the entries to
*
* Convert the Unicode @name to the loaded NLS and pass it to the @filldir
* callback.
*
* If @ia_page is not NULL it is the locked page containing the index
* allocation block containing the index entry @ie.
*
* Note, we drop (and then reacquire) the page lock on @ia_page across the
* @filldir() call otherwise we would deadlock with NFSd when it calls ->lookup
* since ntfs_lookup() will lock the same page. As an optimization, we do not
* retake the lock if we are returning a non-zero value as ntfs_readdir()
* would need to drop the lock immediately anyway.
*/
static inline int ntfs_filldir(ntfs_volume *vol,
ntfs_inode *ndir, struct page *ia_page, INDEX_ENTRY *ie,
u8 *name, struct dir_context *actor)
{
unsigned long mref;
int name_len;
unsigned dt_type;
FILE_NAME_TYPE_FLAGS name_type;
name_type = ie->key.file_name.file_name_type;
if (name_type == FILE_NAME_DOS) {
ntfs_debug("Skipping DOS name space entry.");
return 0;
}
if (MREF_LE(ie->data.dir.indexed_file) == FILE_root) {
ntfs_debug("Skipping root directory self reference entry.");
return 0;
}
if (MREF_LE(ie->data.dir.indexed_file) < FILE_first_user &&
!NVolShowSystemFiles(vol)) {
ntfs_debug("Skipping system file.");
return 0;
}
name_len = ntfs_ucstonls(vol, (ntfschar*)&ie->key.file_name.file_name,
ie->key.file_name.file_name_length, &name,
NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1);
if (name_len <= 0) {
ntfs_warning(vol->sb, "Skipping unrepresentable inode 0x%llx.",
(long long)MREF_LE(ie->data.dir.indexed_file));
return 0;
}
if (ie->key.file_name.file_attributes &
FILE_ATTR_DUP_FILE_NAME_INDEX_PRESENT)
dt_type = DT_DIR;
else
dt_type = DT_REG;
mref = MREF_LE(ie->data.dir.indexed_file);
/*
* Drop the page lock otherwise we deadlock with NFS when it calls
* ->lookup since ntfs_lookup() will lock the same page.
*/
if (ia_page)
unlock_page(ia_page);
ntfs_debug("Calling filldir for %s with len %i, fpos 0x%llx, inode "
"0x%lx, DT_%s.", name, name_len, actor->pos, mref,
dt_type == DT_DIR ? "DIR" : "REG");
if (!dir_emit(actor, name, name_len, mref, dt_type))
return 1;
/* Relock the page but not if we are aborting ->readdir. */
if (ia_page)
lock_page(ia_page);
return 0;
}
/*
* We use the same basic approach as the old NTFS driver, i.e. we parse the
* index root entries and then the index allocation entries that are marked
* as in use in the index bitmap.
*
* While this will return the names in random order this doesn't matter for
* ->readdir but OTOH results in a faster ->readdir.
*
* VFS calls ->readdir without BKL but with i_mutex held. This protects the VFS
* parts (e.g. ->f_pos and ->i_size, and it also protects against directory
* modifications).
*
* Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
* applies the mst protection fixups before writing out and then
* removes them again after the write is complete after which it
* unlocks the page.
*/
static int ntfs_readdir(struct file *file, struct dir_context *actor)
{
s64 ia_pos, ia_start, prev_ia_pos, bmp_pos;
loff_t i_size;
struct inode *bmp_vi, *vdir = file_inode(file);
struct super_block *sb = vdir->i_sb;
ntfs_inode *ndir = NTFS_I(vdir);
ntfs_volume *vol = NTFS_SB(sb);
MFT_RECORD *m;
INDEX_ROOT *ir = NULL;
INDEX_ENTRY *ie;
INDEX_ALLOCATION *ia;
u8 *name = NULL;
int rc, err, ir_pos, cur_bmp_pos;
struct address_space *ia_mapping, *bmp_mapping;
struct page *bmp_page = NULL, *ia_page = NULL;
u8 *kaddr, *bmp, *index_end;
ntfs_attr_search_ctx *ctx;
ntfs_debug("Entering for inode 0x%lx, fpos 0x%llx.",
vdir->i_ino, actor->pos);
rc = err = 0;
/* Are we at end of dir yet? */
i_size = i_size_read(vdir);
if (actor->pos >= i_size + vol->mft_record_size)
return 0;
/* Emulate . and .. for all directories. */
if (!dir_emit_dots(file, actor))
return 0;
m = NULL;
ctx = NULL;
/*
* Allocate a buffer to store the current name being processed
* converted to format determined by current NLS.
*/
name = kmalloc(NTFS_MAX_NAME_LEN * NLS_MAX_CHARSET_SIZE + 1, GFP_NOFS);
if (unlikely(!name)) {
err = -ENOMEM;
goto err_out;
}
/* Are we jumping straight into the index allocation attribute? */
if (actor->pos >= vol->mft_record_size)
goto skip_index_root;
/* Get hold of the mft record for the directory. */
m = map_mft_record(ndir);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(ndir, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
/* Get the offset into the index root attribute. */
ir_pos = (s64)actor->pos;
/* Find the index root attribute in the mft record. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE, 0, NULL,
0, ctx);
if (unlikely(err)) {
ntfs_error(sb, "Index root attribute missing in directory "
"inode 0x%lx.", vdir->i_ino);
goto err_out;
}
/*
* Copy the index root attribute value to a buffer so that we can put
* the search context and unmap the mft record before calling the
* filldir() callback. We need to do this because of NFSd which calls
* ->lookup() from its filldir callback() and this causes NTFS to
* deadlock as ntfs_lookup() maps the mft record of the directory and
* we have got it mapped here already. The only solution is for us to
* unmap the mft record here so that a call to ntfs_lookup() is able to
* map the mft record without deadlocking.
*/
rc = le32_to_cpu(ctx->attr->data.resident.value_length);
ir = kmalloc(rc, GFP_NOFS);
if (unlikely(!ir)) {
err = -ENOMEM;
goto err_out;
}
/* Copy the index root value (it has been verified in read_inode). */
memcpy(ir, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset), rc);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ndir);
ctx = NULL;
m = NULL;
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ir->index +
le32_to_cpu(ir->index.entries_offset));
/*
* Loop until we exceed valid memory (corruption case) or until we
* reach the last entry or until filldir tells us it has had enough
* or signals an error (both covered by the rc test).
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
ntfs_debug("In index root, offset 0x%zx.", (u8*)ie - (u8*)ir);
/* Bounds checks. */
if (unlikely((u8*)ie < (u8*)ir || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->key_length) >
index_end))
goto err_out;
/* The last entry cannot contain a name. */
if (ie->flags & INDEX_ENTRY_END)
break;
/* Skip index root entry if continuing previous readdir. */
if (ir_pos > (u8*)ie - (u8*)ir)
continue;
/* Advance the position even if going to skip the entry. */
actor->pos = (u8*)ie - (u8*)ir;
/* Submit the name to the filldir callback. */
rc = ntfs_filldir(vol, ndir, NULL, ie, name, actor);
if (rc) {
kfree(ir);
goto abort;
}
}
/* We are done with the index root and can free the buffer. */
kfree(ir);
ir = NULL;
/* If there is no index allocation attribute we are finished. */
if (!NInoIndexAllocPresent(ndir))
goto EOD;
/* Advance fpos to the beginning of the index allocation. */
actor->pos = vol->mft_record_size;
skip_index_root:
kaddr = NULL;
prev_ia_pos = -1LL;
/* Get the offset into the index allocation attribute. */
ia_pos = (s64)actor->pos - vol->mft_record_size;
ia_mapping = vdir->i_mapping;
ntfs_debug("Inode 0x%lx, getting index bitmap.", vdir->i_ino);
bmp_vi = ntfs_attr_iget(vdir, AT_BITMAP, I30, 4);
if (IS_ERR(bmp_vi)) {
ntfs_error(sb, "Failed to get bitmap attribute.");
err = PTR_ERR(bmp_vi);
goto err_out;
}
bmp_mapping = bmp_vi->i_mapping;
/* Get the starting bitmap bit position and sanity check it. */
bmp_pos = ia_pos >> ndir->itype.index.block_size_bits;
if (unlikely(bmp_pos >> 3 >= i_size_read(bmp_vi))) {
ntfs_error(sb, "Current index allocation position exceeds "
"index bitmap size.");
goto iput_err_out;
}
/* Get the starting bit position in the current bitmap page. */
cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1);
bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1);
get_next_bmp_page:
ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx",
(unsigned long long)bmp_pos >> (3 + PAGE_SHIFT),
(unsigned long long)bmp_pos &
(unsigned long long)((PAGE_SIZE * 8) - 1));
bmp_page = ntfs_map_page(bmp_mapping,
bmp_pos >> (3 + PAGE_SHIFT));
if (IS_ERR(bmp_page)) {
ntfs_error(sb, "Reading index bitmap failed.");
err = PTR_ERR(bmp_page);
bmp_page = NULL;
goto iput_err_out;
}
bmp = (u8*)page_address(bmp_page);
/* Find next index block in use. */
while (!(bmp[cur_bmp_pos >> 3] & (1 << (cur_bmp_pos & 7)))) {
find_next_index_buffer:
cur_bmp_pos++;
/*
* If we have reached the end of the bitmap page, get the next
* page, and put away the old one.
*/
if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) {
ntfs_unmap_page(bmp_page);
bmp_pos += PAGE_SIZE * 8;
cur_bmp_pos = 0;
goto get_next_bmp_page;
}
/* If we have reached the end of the bitmap, we are done. */
if (unlikely(((bmp_pos + cur_bmp_pos) >> 3) >= i_size))
goto unm_EOD;
ia_pos = (bmp_pos + cur_bmp_pos) <<
ndir->itype.index.block_size_bits;
}
ntfs_debug("Handling index buffer 0x%llx.",
(unsigned long long)bmp_pos + cur_bmp_pos);
/* If the current index buffer is in the same page we reuse the page. */
if ((prev_ia_pos & (s64)PAGE_MASK) !=
(ia_pos & (s64)PAGE_MASK)) {
prev_ia_pos = ia_pos;
if (likely(ia_page != NULL)) {
unlock_page(ia_page);
ntfs_unmap_page(ia_page);
}
/*
* Map the page cache page containing the current ia_pos,
* reading it from disk if necessary.
*/
ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT);
if (IS_ERR(ia_page)) {
ntfs_error(sb, "Reading index allocation data failed.");
err = PTR_ERR(ia_page);
ia_page = NULL;
goto err_out;
}
lock_page(ia_page);
kaddr = (u8*)page_address(ia_page);
}
/* Get the current index buffer. */
ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK &
~(s64)(ndir->itype.index.block_size - 1)));
/* Bounds checks. */
if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) {
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
"inode 0x%lx or driver bug.", vdir->i_ino);
goto err_out;
}
/* Catch multi sector transfer fixup errors. */
if (unlikely(!ntfs_is_indx_record(ia->magic))) {
ntfs_error(sb, "Directory index record with vcn 0x%llx is "
"corrupt. Corrupt inode 0x%lx. Run chkdsk.",
(unsigned long long)ia_pos >>
ndir->itype.index.vcn_size_bits, vdir->i_ino);
goto err_out;
}
if (unlikely(sle64_to_cpu(ia->index_block_vcn) != (ia_pos &
~(s64)(ndir->itype.index.block_size - 1)) >>
ndir->itype.index.vcn_size_bits)) {
ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
"different from expected VCN (0x%llx). "
"Directory inode 0x%lx is corrupt or driver "
"bug. ", (unsigned long long)
sle64_to_cpu(ia->index_block_vcn),
(unsigned long long)ia_pos >>
ndir->itype.index.vcn_size_bits, vdir->i_ino);
goto err_out;
}
if (unlikely(le32_to_cpu(ia->index.allocated_size) + 0x18 !=
ndir->itype.index.block_size)) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx has a size (%u) differing from the "
"directory specified size (%u). Directory "
"inode is corrupt or driver bug.",
(unsigned long long)ia_pos >>
ndir->itype.index.vcn_size_bits, vdir->i_ino,
le32_to_cpu(ia->index.allocated_size) + 0x18,
ndir->itype.index.block_size);
goto err_out;
}
index_end = (u8*)ia + ndir->itype.index.block_size;
if (unlikely(index_end > kaddr + PAGE_SIZE)) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
"0x%lx crosses page boundary. Impossible! "
"Cannot access! This is probably a bug in the "
"driver.", (unsigned long long)ia_pos >>
ndir->itype.index.vcn_size_bits, vdir->i_ino);
goto err_out;
}
ia_start = ia_pos & ~(s64)(ndir->itype.index.block_size - 1);
index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
if (unlikely(index_end > (u8*)ia + ndir->itype.index.block_size)) {
ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of directory "
"inode 0x%lx exceeds maximum size.",
(unsigned long long)ia_pos >>
ndir->itype.index.vcn_size_bits, vdir->i_ino);
goto err_out;
}
/* The first index entry in this index buffer. */
ie = (INDEX_ENTRY*)((u8*)&ia->index +
le32_to_cpu(ia->index.entries_offset));
/*
* Loop until we exceed valid memory (corruption case) or until we
* reach the last entry or until filldir tells us it has had enough
* or signals an error (both covered by the rc test).
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
ntfs_debug("In index allocation, offset 0x%llx.",
(unsigned long long)ia_start +
(unsigned long long)((u8*)ie - (u8*)ia));
/* Bounds checks. */
if (unlikely((u8*)ie < (u8*)ia || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->key_length) >
index_end))
goto err_out;
/* The last entry cannot contain a name. */
if (ie->flags & INDEX_ENTRY_END)
break;
/* Skip index block entry if continuing previous readdir. */
if (ia_pos - ia_start > (u8*)ie - (u8*)ia)
continue;
/* Advance the position even if going to skip the entry. */
actor->pos = (u8*)ie - (u8*)ia +
(sle64_to_cpu(ia->index_block_vcn) <<
ndir->itype.index.vcn_size_bits) +
vol->mft_record_size;
/*
* Submit the name to the @filldir callback. Note,
* ntfs_filldir() drops the lock on @ia_page but it retakes it
* before returning, unless a non-zero value is returned in
* which case the page is left unlocked.
*/
rc = ntfs_filldir(vol, ndir, ia_page, ie, name, actor);
if (rc) {
/* @ia_page is already unlocked in this case. */
ntfs_unmap_page(ia_page);
ntfs_unmap_page(bmp_page);
iput(bmp_vi);
goto abort;
}
}
goto find_next_index_buffer;
unm_EOD:
if (ia_page) {
unlock_page(ia_page);
ntfs_unmap_page(ia_page);
}
ntfs_unmap_page(bmp_page);
iput(bmp_vi);
EOD:
/* We are finished, set fpos to EOD. */
actor->pos = i_size + vol->mft_record_size;
abort:
kfree(name);
return 0;
err_out:
if (bmp_page) {
ntfs_unmap_page(bmp_page);
iput_err_out:
iput(bmp_vi);
}
if (ia_page) {
unlock_page(ia_page);
ntfs_unmap_page(ia_page);
}
kfree(ir);
kfree(name);
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(ndir);
if (!err)
err = -EIO;
ntfs_debug("Failed. Returning error code %i.", -err);
return err;
}
/**
* ntfs_dir_open - called when an inode is about to be opened
* @vi: inode to be opened
* @filp: file structure describing the inode
*
* Limit directory size to the page cache limit on architectures where unsigned
* long is 32-bits. This is the most we can do for now without overflowing the
* page cache page index. Doing it this way means we don't run into problems
* because of existing too large directories. It would be better to allow the
* user to read the accessible part of the directory but I doubt very much
* anyone is going to hit this check on a 32-bit architecture, so there is no
* point in adding the extra complexity required to support this.
*
* On 64-bit architectures, the check is hopefully optimized away by the
* compiler.
*/
static int ntfs_dir_open(struct inode *vi, struct file *filp)
{
if (sizeof(unsigned long) < 8) {
if (i_size_read(vi) > MAX_LFS_FILESIZE)
return -EFBIG;
}
return 0;
}
#ifdef NTFS_RW
/**
* ntfs_dir_fsync - sync a directory to disk
* @filp: directory to be synced
* @dentry: dentry describing the directory to sync
* @datasync: if non-zero only flush user data and not metadata
*
* Data integrity sync of a directory to disk. Used for fsync, fdatasync, and
* msync system calls. This function is based on file.c::ntfs_file_fsync().
*
* Write the mft record and all associated extent mft records as well as the
* $INDEX_ALLOCATION and $BITMAP attributes and then sync the block device.
*
* If @datasync is true, we do not wait on the inode(s) to be written out
* but we always wait on the page cache pages to be written out.
*
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
* Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
* this problem for now. We do write the $BITMAP attribute if it is present
* which is the important one for a directory so things are not too bad.
*/
static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
struct inode *bmp_vi, *vi = filp->f_mapping->host;
int err, ret;
ntfs_attr na;
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
err = file_write_and_wait_range(filp, start, end);
if (err)
return err;
inode_lock(vi);
BUG_ON(!S_ISDIR(vi->i_mode));
/* If the bitmap attribute inode is in memory sync it, too. */
na.mft_no = vi->i_ino;
na.type = AT_BITMAP;
na.name = I30;
na.name_len = 4;
bmp_vi = ilookup5(vi->i_sb, vi->i_ino, ntfs_test_inode, &na);
if (bmp_vi) {
write_inode_now(bmp_vi, !datasync);
iput(bmp_vi);
}
ret = __ntfs_write_inode(vi, 1);
write_inode_now(vi, !datasync);
err = sync_blockdev(vi->i_sb->s_bdev);
if (unlikely(err && !ret))
ret = err;
if (likely(!ret))
ntfs_debug("Done.");
else
ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
"%u.", datasync ? "data" : "", vi->i_ino, -ret);
inode_unlock(vi);
return ret;
}
#endif /* NTFS_RW */
WRAP_DIR_ITER(ntfs_readdir) // FIXME!
const struct file_operations ntfs_dir_ops = {
.llseek = generic_file_llseek, /* Seek inside directory. */
.read = generic_read_dir, /* Return -EISDIR. */
.iterate_shared = shared_ntfs_readdir, /* Read directory contents. */
#ifdef NTFS_RW
.fsync = ntfs_dir_fsync, /* Sync a directory to disk. */
#endif /* NTFS_RW */
/*.ioctl = ,*/ /* Perform function on the
mounted filesystem. */
.open = ntfs_dir_open, /* Open directory. */
};
| linux-master | fs/ntfs/dir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* debug.c - NTFS kernel debug support. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "debug.h"
/**
* __ntfs_warning - output a warning to the syslog
* @function: name of function outputting the warning
* @sb: super block of mounted ntfs filesystem
* @fmt: warning string containing format specifications
* @...: a variable number of arguments specified in @fmt
*
* Outputs a warning to the syslog for the mounted ntfs filesystem described
* by @sb.
*
* @fmt and the corresponding @... is printf style format string containing
* the warning string and the corresponding format arguments, respectively.
*
* @function is the name of the function from which __ntfs_warning is being
* called.
*
* Note, you should be using debug.h::ntfs_warning(@sb, @fmt, @...) instead
* as this provides the @function parameter automatically.
*/
void __ntfs_warning(const char *function, const struct super_block *sb,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int flen = 0;
#ifndef DEBUG
if (!printk_ratelimit())
return;
#endif
if (function)
flen = strlen(function);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (sb)
pr_warn("(device %s): %s(): %pV\n",
sb->s_id, flen ? function : "", &vaf);
else
pr_warn("%s(): %pV\n", flen ? function : "", &vaf);
va_end(args);
}
/**
* __ntfs_error - output an error to the syslog
* @function: name of function outputting the error
* @sb: super block of mounted ntfs filesystem
* @fmt: error string containing format specifications
* @...: a variable number of arguments specified in @fmt
*
* Outputs an error to the syslog for the mounted ntfs filesystem described
* by @sb.
*
* @fmt and the corresponding @... is printf style format string containing
* the error string and the corresponding format arguments, respectively.
*
* @function is the name of the function from which __ntfs_error is being
* called.
*
* Note, you should be using debug.h::ntfs_error(@sb, @fmt, @...) instead
* as this provides the @function parameter automatically.
*/
void __ntfs_error(const char *function, const struct super_block *sb,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int flen = 0;
#ifndef DEBUG
if (!printk_ratelimit())
return;
#endif
if (function)
flen = strlen(function);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (sb)
pr_err("(device %s): %s(): %pV\n",
sb->s_id, flen ? function : "", &vaf);
else
pr_err("%s(): %pV\n", flen ? function : "", &vaf);
va_end(args);
}
#ifdef DEBUG
/* If 1, output debug messages, and if 0, don't. */
int debug_msgs = 0;
void __ntfs_debug(const char *file, int line, const char *function,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int flen = 0;
if (!debug_msgs)
return;
if (function)
flen = strlen(function);
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_debug("(%s, %d): %s(): %pV", file, line, flen ? function : "", &vaf);
va_end(args);
}
/* Dump a runlist. Caller has to provide synchronisation for @rl. */
void ntfs_debug_dump_runlist(const runlist_element *rl)
{
int i;
const char *lcn_str[5] = { "LCN_HOLE ", "LCN_RL_NOT_MAPPED",
"LCN_ENOENT ", "LCN_unknown " };
if (!debug_msgs)
return;
pr_debug("Dumping runlist (values in hex):\n");
if (!rl) {
pr_debug("Run list not present.\n");
return;
}
pr_debug("VCN LCN Run length\n");
for (i = 0; ; i++) {
LCN lcn = (rl + i)->lcn;
if (lcn < (LCN)0) {
int index = -lcn - 1;
if (index > -LCN_ENOENT - 1)
index = 3;
pr_debug("%-16Lx %s %-16Lx%s\n",
(long long)(rl + i)->vcn, lcn_str[index],
(long long)(rl + i)->length,
(rl + i)->length ? "" :
" (runlist end)");
} else
pr_debug("%-16Lx %-16Lx %-16Lx%s\n",
(long long)(rl + i)->vcn,
(long long)(rl + i)->lcn,
(long long)(rl + i)->length,
(rl + i)->length ? "" :
" (runlist end)");
if (!(rl + i)->length)
break;
}
}
#endif
| linux-master | fs/ntfs/debug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inode.c - NTFS kernel inode handling.
*
* Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
*/
#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include "aops.h"
#include "attrib.h"
#include "bitmap.h"
#include "dir.h"
#include "debug.h"
#include "inode.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "time.h"
#include "ntfs.h"
/**
* ntfs_test_inode - compare two (possibly fake) inodes for equality
* @vi: vfs inode which to test
* @data: data which is being tested with
*
* Compare the ntfs attribute embedded in the ntfs specific part of the vfs
* inode @vi for equality with the ntfs attribute @data.
*
* If searching for the normal file/directory inode, set @na->type to AT_UNUSED.
* @na->name and @na->name_len are then ignored.
*
* Return 1 if the attributes match and 0 if not.
*
* NOTE: This function runs with the inode_hash_lock spin lock held so it is not
* allowed to sleep.
*/
int ntfs_test_inode(struct inode *vi, void *data)
{
ntfs_attr *na = (ntfs_attr *)data;
ntfs_inode *ni;
if (vi->i_ino != na->mft_no)
return 0;
ni = NTFS_I(vi);
/* If !NInoAttr(ni), @vi is a normal file or directory inode. */
if (likely(!NInoAttr(ni))) {
/* If not looking for a normal inode this is a mismatch. */
if (unlikely(na->type != AT_UNUSED))
return 0;
} else {
/* A fake inode describing an attribute. */
if (ni->type != na->type)
return 0;
if (ni->name_len != na->name_len)
return 0;
if (na->name_len && memcmp(ni->name, na->name,
na->name_len * sizeof(ntfschar)))
return 0;
}
/* Match! */
return 1;
}
/**
* ntfs_init_locked_inode - initialize an inode
* @vi: vfs inode to initialize
* @data: data which to initialize @vi to
*
* Initialize the vfs inode @vi with the values from the ntfs attribute @data in
* order to enable ntfs_test_inode() to do its work.
*
* If initializing the normal file/directory inode, set @na->type to AT_UNUSED.
* In that case, @na->name and @na->name_len should be set to NULL and 0,
* respectively. Although that is not strictly necessary as
* ntfs_read_locked_inode() will fill them in later.
*
* Return 0 on success and -errno on error.
*
* NOTE: This function runs with the inode->i_lock spin lock held so it is not
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
*/
static int ntfs_init_locked_inode(struct inode *vi, void *data)
{
ntfs_attr *na = (ntfs_attr *)data;
ntfs_inode *ni = NTFS_I(vi);
vi->i_ino = na->mft_no;
ni->type = na->type;
if (na->type == AT_INDEX_ALLOCATION)
NInoSetMstProtected(ni);
ni->name = na->name;
ni->name_len = na->name_len;
/* If initializing a normal inode, we are done. */
if (likely(na->type == AT_UNUSED)) {
BUG_ON(na->name);
BUG_ON(na->name_len);
return 0;
}
/* It is a fake inode. */
NInoSetAttr(ni);
/*
* We have I30 global constant as an optimization as it is the name
* in >99.9% of named attributes! The other <0.1% incur a GFP_ATOMIC
* allocation but that is ok. And most attributes are unnamed anyway,
* thus the fraction of named attributes with name != I30 is actually
* absolutely tiny.
*/
if (na->name_len && na->name != I30) {
unsigned int i;
BUG_ON(!na->name);
i = na->name_len * sizeof(ntfschar);
ni->name = kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
ni->name[na->name_len] = 0;
}
return 0;
}
static int ntfs_read_locked_inode(struct inode *vi);
static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi);
static int ntfs_read_locked_index_inode(struct inode *base_vi,
struct inode *vi);
/**
* ntfs_iget - obtain a struct inode corresponding to a specific normal inode
* @sb: super block of mounted volume
* @mft_no: mft record number / inode number to obtain
*
* Obtain the struct inode corresponding to a specific normal inode (i.e. a
* file or directory).
*
* If the inode is in the cache, it is just returned with an increased
* reference count. Otherwise, a new struct inode is allocated and initialized,
* and finally ntfs_read_locked_inode() is called to read in the inode and
* fill in the remainder of the inode structure.
*
* Return the struct inode on success. Check the return value with IS_ERR() and
* if true, the function failed and the error code is obtained from PTR_ERR().
*/
struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no)
{
struct inode *vi;
int err;
ntfs_attr na;
na.mft_no = mft_no;
na.type = AT_UNUSED;
na.name = NULL;
na.name_len = 0;
vi = iget5_locked(sb, mft_no, ntfs_test_inode,
ntfs_init_locked_inode, &na);
if (unlikely(!vi))
return ERR_PTR(-ENOMEM);
err = 0;
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
err = ntfs_read_locked_inode(vi);
unlock_new_inode(vi);
}
/*
* There is no point in keeping bad inodes around if the failure was
* due to ENOMEM. We want to be able to retry again later.
*/
if (unlikely(err == -ENOMEM)) {
iput(vi);
vi = ERR_PTR(err);
}
return vi;
}
/**
* ntfs_attr_iget - obtain a struct inode corresponding to an attribute
* @base_vi: vfs base inode containing the attribute
* @type: attribute type
* @name: Unicode name of the attribute (NULL if unnamed)
* @name_len: length of @name in Unicode characters (0 if unnamed)
*
* Obtain the (fake) struct inode corresponding to the attribute specified by
* @type, @name, and @name_len, which is present in the base mft record
* specified by the vfs inode @base_vi.
*
* If the attribute inode is in the cache, it is just returned with an
* increased reference count. Otherwise, a new struct inode is allocated and
* initialized, and finally ntfs_read_locked_attr_inode() is called to read the
* attribute and fill in the inode structure.
*
* Note, for index allocation attributes, you need to use ntfs_index_iget()
* instead of ntfs_attr_iget() as working with indices is a lot more complex.
*
* Return the struct inode of the attribute inode on success. Check the return
* value with IS_ERR() and if true, the function failed and the error code is
* obtained from PTR_ERR().
*/
struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type,
ntfschar *name, u32 name_len)
{
struct inode *vi;
int err;
ntfs_attr na;
/* Make sure no one calls ntfs_attr_iget() for indices. */
BUG_ON(type == AT_INDEX_ALLOCATION);
na.mft_no = base_vi->i_ino;
na.type = type;
na.name = name;
na.name_len = name_len;
vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode,
ntfs_init_locked_inode, &na);
if (unlikely(!vi))
return ERR_PTR(-ENOMEM);
err = 0;
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
err = ntfs_read_locked_attr_inode(base_vi, vi);
unlock_new_inode(vi);
}
/*
* There is no point in keeping bad attribute inodes around. This also
* simplifies things in that we never need to check for bad attribute
* inodes elsewhere.
*/
if (unlikely(err)) {
iput(vi);
vi = ERR_PTR(err);
}
return vi;
}
/**
* ntfs_index_iget - obtain a struct inode corresponding to an index
* @base_vi: vfs base inode containing the index related attributes
* @name: Unicode name of the index
* @name_len: length of @name in Unicode characters
*
* Obtain the (fake) struct inode corresponding to the index specified by @name
* and @name_len, which is present in the base mft record specified by the vfs
* inode @base_vi.
*
* If the index inode is in the cache, it is just returned with an increased
* reference count. Otherwise, a new struct inode is allocated and
* initialized, and finally ntfs_read_locked_index_inode() is called to read
* the index related attributes and fill in the inode structure.
*
* Return the struct inode of the index inode on success. Check the return
* value with IS_ERR() and if true, the function failed and the error code is
* obtained from PTR_ERR().
*/
struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name,
u32 name_len)
{
struct inode *vi;
int err;
ntfs_attr na;
na.mft_no = base_vi->i_ino;
na.type = AT_INDEX_ALLOCATION;
na.name = name;
na.name_len = name_len;
vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode,
ntfs_init_locked_inode, &na);
if (unlikely(!vi))
return ERR_PTR(-ENOMEM);
err = 0;
/* If this is a freshly allocated inode, need to read it now. */
if (vi->i_state & I_NEW) {
err = ntfs_read_locked_index_inode(base_vi, vi);
unlock_new_inode(vi);
}
/*
* There is no point in keeping bad index inodes around. This also
* simplifies things in that we never need to check for bad index
* inodes elsewhere.
*/
if (unlikely(err)) {
iput(vi);
vi = ERR_PTR(err);
}
return vi;
}
struct inode *ntfs_alloc_big_inode(struct super_block *sb)
{
ntfs_inode *ni;
ntfs_debug("Entering.");
ni = alloc_inode_sb(sb, ntfs_big_inode_cache, GFP_NOFS);
if (likely(ni != NULL)) {
ni->state = 0;
return VFS_I(ni);
}
ntfs_error(sb, "Allocation of NTFS big inode structure failed.");
return NULL;
}
void ntfs_free_big_inode(struct inode *inode)
{
kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
}
static inline ntfs_inode *ntfs_alloc_extent_inode(void)
{
ntfs_inode *ni;
ntfs_debug("Entering.");
ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
if (likely(ni != NULL)) {
ni->state = 0;
return ni;
}
ntfs_error(NULL, "Allocation of NTFS inode structure failed.");
return NULL;
}
static void ntfs_destroy_extent_inode(ntfs_inode *ni)
{
ntfs_debug("Entering.");
BUG_ON(ni->page);
if (!atomic_dec_and_test(&ni->count))
BUG();
kmem_cache_free(ntfs_inode_cache, ni);
}
/*
* The attribute runlist lock has separate locking rules from the
* normal runlist lock, so split the two lock-classes:
*/
static struct lock_class_key attr_list_rl_lock_class;
/**
* __ntfs_init_inode - initialize ntfs specific part of an inode
* @sb: super block of mounted volume
* @ni: freshly allocated ntfs inode which to initialize
*
* Initialize an ntfs inode to defaults.
*
* NOTE: ni->mft_no, ni->state, ni->type, ni->name, and ni->name_len are left
* untouched. Make sure to initialize them elsewhere.
*
* Return zero on success and -ENOMEM on error.
*/
void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni)
{
ntfs_debug("Entering.");
rwlock_init(&ni->size_lock);
ni->initialized_size = ni->allocated_size = 0;
ni->seq_no = 0;
atomic_set(&ni->count, 1);
ni->vol = NTFS_SB(sb);
ntfs_init_runlist(&ni->runlist);
mutex_init(&ni->mrec_lock);
ni->page = NULL;
ni->page_ofs = 0;
ni->attr_list_size = 0;
ni->attr_list = NULL;
ntfs_init_runlist(&ni->attr_list_rl);
lockdep_set_class(&ni->attr_list_rl.lock,
&attr_list_rl_lock_class);
ni->itype.index.block_size = 0;
ni->itype.index.vcn_size = 0;
ni->itype.index.collation_rule = 0;
ni->itype.index.block_size_bits = 0;
ni->itype.index.vcn_size_bits = 0;
mutex_init(&ni->extent_lock);
ni->nr_extents = 0;
ni->ext.base_ntfs_ino = NULL;
}
/*
* Extent inodes get MFT-mapped in a nested way, while the base inode
* is still mapped. Teach this nesting to the lock validator by creating
* a separate class for nested inode's mrec_lock's:
*/
static struct lock_class_key extent_inode_mrec_lock_key;
inline ntfs_inode *ntfs_new_extent_inode(struct super_block *sb,
unsigned long mft_no)
{
ntfs_inode *ni = ntfs_alloc_extent_inode();
ntfs_debug("Entering.");
if (likely(ni != NULL)) {
__ntfs_init_inode(sb, ni);
lockdep_set_class(&ni->mrec_lock, &extent_inode_mrec_lock_key);
ni->mft_no = mft_no;
ni->type = AT_UNUSED;
ni->name = NULL;
ni->name_len = 0;
}
return ni;
}
/**
* ntfs_is_extended_system_file - check if a file is in the $Extend directory
* @ctx: initialized attribute search context
*
* Search all file name attributes in the inode described by the attribute
* search context @ctx and check if any of the names are in the $Extend system
* directory.
*
* Return values:
* 1: file is in $Extend directory
* 0: file is not in $Extend directory
* -errno: failed to determine if the file is in the $Extend directory
*/
static int ntfs_is_extended_system_file(ntfs_attr_search_ctx *ctx)
{
int nr_links, err;
/* Restart search. */
ntfs_attr_reinit_search_ctx(ctx);
/* Get number of hard links. */
nr_links = le16_to_cpu(ctx->mrec->link_count);
/* Loop through all hard links. */
while (!(err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0,
ctx))) {
FILE_NAME_ATTR *file_name_attr;
ATTR_RECORD *attr = ctx->attr;
u8 *p, *p2;
nr_links--;
/*
* Maximum sanity checking as we are called on an inode that
* we suspect might be corrupt.
*/
p = (u8*)attr + le32_to_cpu(attr->length);
if (p < (u8*)ctx->mrec || (u8*)p > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_in_use)) {
err_corrupt_attr:
ntfs_error(ctx->ntfs_ino->vol->sb, "Corrupt file name "
"attribute. You should run chkdsk.");
return -EIO;
}
if (attr->non_resident) {
ntfs_error(ctx->ntfs_ino->vol->sb, "Non-resident file "
"name. You should run chkdsk.");
return -EIO;
}
if (attr->flags) {
ntfs_error(ctx->ntfs_ino->vol->sb, "File name with "
"invalid flags. You should run "
"chkdsk.");
return -EIO;
}
if (!(attr->data.resident.flags & RESIDENT_ATTR_IS_INDEXED)) {
ntfs_error(ctx->ntfs_ino->vol->sb, "Unindexed file "
"name. You should run chkdsk.");
return -EIO;
}
file_name_attr = (FILE_NAME_ATTR*)((u8*)attr +
le16_to_cpu(attr->data.resident.value_offset));
p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length);
if (p2 < (u8*)attr || p2 > p)
goto err_corrupt_attr;
/* This attribute is ok, but is it in the $Extend directory? */
if (MREF_LE(file_name_attr->parent_directory) == FILE_Extend)
return 1; /* YES, it's an extended system file. */
}
if (unlikely(err != -ENOENT))
return err;
if (unlikely(nr_links)) {
ntfs_error(ctx->ntfs_ino->vol->sb, "Inode hard link count "
"doesn't match number of name attributes. You "
"should run chkdsk.");
return -EIO;
}
return 0; /* NO, it is not an extended system file. */
}
/**
* ntfs_read_locked_inode - read an inode from its device
* @vi: inode to read
*
* ntfs_read_locked_inode() is called from ntfs_iget() to read the inode
* described by @vi into memory from the device.
*
* The only fields in @vi that we need to/can look at when the function is
* called are i_sb, pointing to the mounted device's super block, and i_ino,
* the number of the inode to load.
*
* ntfs_read_locked_inode() maps, pins and locks the mft record number i_ino
* for reading and sets up the necessary @vi fields as well as initializing
* the ntfs inode.
*
* Q: What locks are held when the function is called?
* A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
* i_flags is set to 0 and we have no business touching it. Only an ioctl()
* is allowed to write to them. We should of course be honouring them but
* we need to do that using the IS_* macros defined in include/linux/fs.h.
* In any case ntfs_read_locked_inode() has nothing to do with i_flags.
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
*/
static int ntfs_read_locked_inode(struct inode *vi)
{
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni;
struct inode *bvi;
MFT_RECORD *m;
ATTR_RECORD *a;
STANDARD_INFORMATION *si;
ntfs_attr_search_ctx *ctx;
int err = 0;
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
/* Setup the generic vfs inode parts now. */
vi->i_uid = vol->uid;
vi->i_gid = vol->gid;
vi->i_mode = 0;
/*
* Initialize the ntfs specific part of @vi special casing
* FILE_MFT which we need to do at mount time.
*/
if (vi->i_ino != FILE_MFT)
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
err = -ENOMEM;
goto unm_err_out;
}
if (!(m->flags & MFT_RECORD_IN_USE)) {
ntfs_error(vi->i_sb, "Inode is not in use!");
goto unm_err_out;
}
if (m->base_mft_record) {
ntfs_error(vi->i_sb, "Inode is an extent inode!");
goto unm_err_out;
}
/* Transfer information from mft record into vfs and ntfs inodes. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
/*
* FIXME: Keep in mind that link_count is two for files which have both
* a long file name and a short file name as separate entries, so if
* we are hiding short file names this will be too high. Either we need
* to account for the short file names by subtracting them or we need
* to make sure we delete files even though i_nlink is not zero which
* might be tricky due to vfs interactions. Need to think about this
* some more when implementing the unlink command.
*/
set_nlink(vi, le16_to_cpu(m->link_count));
/*
* FIXME: Reparse points can have the directory bit set even though
* they would be S_IFLNK. Need to deal with this further below when we
* implement reparse points / symbolic links but it will do for now.
* Also if not a directory, it could be something else, rather than
* a regular file. But again, will do for now.
*/
/* Everyone gets all permissions. */
vi->i_mode |= S_IRWXUGO;
/* If read-only, no one gets write permissions. */
if (IS_RDONLY(vi))
vi->i_mode &= ~S_IWUGO;
if (m->flags & MFT_RECORD_IS_DIRECTORY) {
vi->i_mode |= S_IFDIR;
/*
* Apply the directory permissions mask set in the mount
* options.
*/
vi->i_mode &= ~vol->dmask;
/* Things break without this kludge! */
if (vi->i_nlink > 1)
set_nlink(vi, 1);
} else {
vi->i_mode |= S_IFREG;
/* Apply the file permissions mask set in the mount options. */
vi->i_mode &= ~vol->fmask;
}
/*
* Find the standard information attribute in the mft record. At this
* stage we haven't setup the attribute list stuff yet, so this could
* in fact fail if the standard information is in an extent record, but
* I don't think this actually ever happens.
*/
err = ntfs_attr_lookup(AT_STANDARD_INFORMATION, NULL, 0, 0, 0, NULL, 0,
ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
/*
* TODO: We should be performing a hot fix here (if the
* recover mount option is set) by creating a new
* attribute.
*/
ntfs_error(vi->i_sb, "$STANDARD_INFORMATION attribute "
"is missing.");
}
goto unm_err_out;
}
a = ctx->attr;
/* Get the standard information attribute value. */
if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
+ le32_to_cpu(a->data.resident.value_length) >
(u8 *)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
goto unm_err_out;
}
si = (STANDARD_INFORMATION*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));
/* Transfer information from the standard information into vi. */
/*
* Note: The i_?times do not quite map perfectly onto the NTFS times,
* but they are close enough, and in the end it doesn't really matter
* that much...
*/
/*
* mtime is the last change of the data within the file. Not changed
* when only metadata is changed, e.g. a rename doesn't affect mtime.
*/
vi->i_mtime = ntfs2utc(si->last_data_change_time);
/*
* ctime is the last change of the metadata of the file. This obviously
* always changes, when mtime is changed. ctime can be changed on its
* own, mtime is then not changed, e.g. when a file is renamed.
*/
inode_set_ctime_to_ts(vi, ntfs2utc(si->last_mft_change_time));
/*
* Last access to the data within the file. Not changed during a rename
* for example but changed whenever the file is written to.
*/
vi->i_atime = ntfs2utc(si->last_access_time);
/* Find the attribute list attribute if present. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_ATTRIBUTE_LIST, NULL, 0, 0, 0, NULL, 0, ctx);
if (err) {
if (unlikely(err != -ENOENT)) {
ntfs_error(vi->i_sb, "Failed to lookup attribute list "
"attribute.");
goto unm_err_out;
}
} else /* if (!err) */ {
if (vi->i_ino == FILE_MFT)
goto skip_attr_list_load;
ntfs_debug("Attribute list found in inode 0x%lx.", vi->i_ino);
NInoSetAttrList(ni);
a = ctx->attr;
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "Attribute list attribute is "
"compressed.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED ||
a->flags & ATTR_IS_SPARSE) {
if (a->non_resident) {
ntfs_error(vi->i_sb, "Non-resident attribute "
"list attribute is encrypted/"
"sparse.");
goto unm_err_out;
}
ntfs_warning(vi->i_sb, "Resident attribute list "
"attribute in inode 0x%lx is marked "
"encrypted/sparse which is not true. "
"However, Windows allows this and "
"chkdsk does not detect or correct it "
"so we will just ignore the invalid "
"flags and pretend they are not set.",
vi->i_ino);
}
/* Now allocate memory for the attribute list. */
ni->attr_list_size = (u32)ntfs_attr_size(a);
ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
if (!ni->attr_list) {
ntfs_error(vi->i_sb, "Not enough memory to allocate "
"buffer for attribute list.");
err = -ENOMEM;
goto unm_err_out;
}
if (a->non_resident) {
NInoSetAttrListNonResident(ni);
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "Attribute list has non "
"zero lowest_vcn.");
goto unm_err_out;
}
/*
* Setup the runlist. No need for locking as we have
* exclusive access to the inode at this time.
*/
ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
a, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
err = PTR_ERR(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
ntfs_error(vi->i_sb, "Mapping pairs "
"decompression failed.");
goto unm_err_out;
}
/* Now load the attribute list. */
if ((err = load_attribute_list(vol, &ni->attr_list_rl,
ni->attr_list, ni->attr_list_size,
sle64_to_cpu(a->data.non_resident.
initialized_size)))) {
ntfs_error(vi->i_sb, "Failed to load "
"attribute list attribute.");
goto unm_err_out;
}
} else /* if (!a->non_resident) */ {
if ((u8*)a + le16_to_cpu(a->data.resident.value_offset)
+ le32_to_cpu(
a->data.resident.value_length) >
(u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "Corrupt attribute list "
"in inode.");
goto unm_err_out;
}
/* Now copy the attribute list. */
memcpy(ni->attr_list, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
le32_to_cpu(
a->data.resident.value_length));
}
}
skip_attr_list_load:
/*
* If an attribute list is present we now have the attribute list value
* in ntfs_ino->attr_list and it is ntfs_ino->attr_list_size bytes.
*/
if (S_ISDIR(vi->i_mode)) {
loff_t bvi_size;
ntfs_inode *bni;
INDEX_ROOT *ir;
u8 *ir_end, *index_end;
/* It is a directory, find index root attribute. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_INDEX_ROOT, I30, 4, CASE_SENSITIVE,
0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
// FIXME: File is corrupt! Hot-fix with empty
// index root attribute if recovery option is
// set.
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute "
"is missing.");
}
goto unm_err_out;
}
a = ctx->attr;
/* Set up the state. */
if (unlikely(a->non_resident)) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute is not "
"resident.");
goto unm_err_out;
}
/* Ensure the attribute name is placed before the value. */
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(a->data.resident.value_offset)))) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute name is "
"placed after the attribute value.");
goto unm_err_out;
}
/*
* Compressed/encrypted index root just means that the newly
* created files in that directory should be created compressed/
* encrypted. However index root cannot be both compressed and
* encrypted.
*/
if (a->flags & ATTR_COMPRESSION_MASK)
NInoSetCompressed(ni);
if (a->flags & ATTR_IS_ENCRYPTED) {
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "Found encrypted and "
"compressed attribute.");
goto unm_err_out;
}
NInoSetEncrypted(ni);
}
if (a->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
ir = (INDEX_ROOT*)((u8*)a +
le16_to_cpu(a->data.resident.value_offset));
ir_end = (u8*)ir + le32_to_cpu(a->data.resident.value_length);
if (ir_end > (u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is "
"corrupt.");
goto unm_err_out;
}
index_end = (u8*)&ir->index +
le32_to_cpu(ir->index.index_length);
if (index_end > ir_end) {
ntfs_error(vi->i_sb, "Directory index is corrupt.");
goto unm_err_out;
}
if (ir->type != AT_FILE_NAME) {
ntfs_error(vi->i_sb, "Indexed attribute is not "
"$FILE_NAME.");
goto unm_err_out;
}
if (ir->collation_rule != COLLATION_FILE_NAME) {
ntfs_error(vi->i_sb, "Index collation rule is not "
"COLLATION_FILE_NAME.");
goto unm_err_out;
}
ni->itype.index.collation_rule = ir->collation_rule;
ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
if (ni->itype.index.block_size &
(ni->itype.index.block_size - 1)) {
ntfs_error(vi->i_sb, "Index block size (%u) is not a "
"power of two.",
ni->itype.index.block_size);
goto unm_err_out;
}
if (ni->itype.index.block_size > PAGE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > "
"PAGE_SIZE (%ld) is not "
"supported. Sorry.",
ni->itype.index.block_size,
PAGE_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (ni->itype.index.block_size < NTFS_BLOCK_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) < "
"NTFS_BLOCK_SIZE (%i) is not "
"supported. Sorry.",
ni->itype.index.block_size,
NTFS_BLOCK_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
ni->itype.index.block_size_bits =
ffs(ni->itype.index.block_size) - 1;
/* Determine the size of a vcn in the directory index. */
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
ni->itype.index.vcn_size_bits = vol->cluster_size_bits;
} else {
ni->itype.index.vcn_size = vol->sector_size;
ni->itype.index.vcn_size_bits = vol->sector_size_bits;
}
/* Setup the index allocation attribute, even if not present. */
NInoSetMstProtected(ni);
ni->type = AT_INDEX_ALLOCATION;
ni->name = I30;
ni->name_len = 4;
if (!(ir->index.flags & LARGE_INDEX)) {
/* No index allocation. */
vi->i_size = ni->initialized_size =
ni->allocated_size = 0;
/* We are done with the mft record, so we release it. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
goto skip_large_dir_stuff;
} /* LARGE_INDEX: Index allocation present. Setup state. */
NInoSetIndexAllocPresent(ni);
/* Find index allocation attribute. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_INDEX_ALLOCATION, I30, 4,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION "
"attribute is not present but "
"$INDEX_ROOT indicated it is.");
else
ntfs_error(vi->i_sb, "Failed to lookup "
"$INDEX_ALLOCATION "
"attribute.");
goto unm_err_out;
}
a = ctx->attr;
if (!a->non_resident) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is resident.");
goto unm_err_out;
}
/*
* Ensure the attribute name is placed before the mapping pairs
* array.
*/
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(
a->data.non_resident.mapping_pairs_offset)))) {
ntfs_error(vol->sb, "$INDEX_ALLOCATION attribute name "
"is placed after the mapping pairs "
"array.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is encrypted.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_SPARSE) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is sparse.");
goto unm_err_out;
}
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute "
"is compressed.");
goto unm_err_out;
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of "
"$INDEX_ALLOCATION attribute has non "
"zero lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
/*
* We are done with the mft record, so we release it. Otherwise
* we would deadlock in ntfs_attr_iget().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
/* Get the index bitmap attribute inode. */
bvi = ntfs_attr_iget(vi, AT_BITMAP, I30, 4);
if (IS_ERR(bvi)) {
ntfs_error(vi->i_sb, "Failed to get bitmap attribute.");
err = PTR_ERR(bvi);
goto unm_err_out;
}
bni = NTFS_I(bvi);
if (NInoCompressed(bni) || NInoEncrypted(bni) ||
NInoSparse(bni)) {
ntfs_error(vi->i_sb, "$BITMAP attribute is compressed "
"and/or encrypted and/or sparse.");
goto iput_unm_err_out;
}
/* Consistency check bitmap size vs. index allocation size. */
bvi_size = i_size_read(bvi);
if ((bvi_size << 3) < (vi->i_size >>
ni->itype.index.block_size_bits)) {
ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) "
"for index allocation (0x%llx).",
bvi_size << 3, vi->i_size);
goto iput_unm_err_out;
}
/* No longer need the bitmap attribute inode. */
iput(bvi);
skip_large_dir_stuff:
/* Setup the operations for this inode. */
vi->i_op = &ntfs_dir_inode_ops;
vi->i_fop = &ntfs_dir_ops;
vi->i_mapping->a_ops = &ntfs_mst_aops;
} else {
/* It is a file. */
ntfs_attr_reinit_search_ctx(ctx);
/* Setup the data attribute, even if not present. */
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
/* Find first extent of the unnamed data attribute. */
err = ntfs_attr_lookup(AT_DATA, NULL, 0, 0, 0, NULL, 0, ctx);
if (unlikely(err)) {
vi->i_size = ni->initialized_size =
ni->allocated_size = 0;
if (err != -ENOENT) {
ntfs_error(vi->i_sb, "Failed to lookup $DATA "
"attribute.");
goto unm_err_out;
}
/*
* FILE_Secure does not have an unnamed $DATA
* attribute, so we special case it here.
*/
if (vi->i_ino == FILE_Secure)
goto no_data_attr_special_case;
/*
* Most if not all the system files in the $Extend
* system directory do not have unnamed data
* attributes so we need to check if the parent
* directory of the file is FILE_Extend and if it is
* ignore this error. To do this we need to get the
* name of this inode from the mft record as the name
* contains the back reference to the parent directory.
*/
if (ntfs_is_extended_system_file(ctx) > 0)
goto no_data_attr_special_case;
// FIXME: File is corrupt! Hot-fix with empty data
// attribute if recovery option is set.
ntfs_error(vi->i_sb, "$DATA attribute is missing.");
goto unm_err_out;
}
a = ctx->attr;
/* Setup the state. */
if (a->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_SPARSE)) {
if (a->flags & ATTR_COMPRESSION_MASK) {
NInoSetCompressed(ni);
if (vol->cluster_size > 4096) {
ntfs_error(vi->i_sb, "Found "
"compressed data but "
"compression is "
"disabled due to "
"cluster size (%i) > "
"4kiB.",
vol->cluster_size);
goto unm_err_out;
}
if ((a->flags & ATTR_COMPRESSION_MASK)
!= ATTR_IS_COMPRESSED) {
ntfs_error(vi->i_sb, "Found unknown "
"compression method "
"or corrupt file.");
goto unm_err_out;
}
}
if (a->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
}
if (a->flags & ATTR_IS_ENCRYPTED) {
if (NInoCompressed(ni)) {
ntfs_error(vi->i_sb, "Found encrypted and "
"compressed data.");
goto unm_err_out;
}
NInoSetEncrypted(ni);
}
if (a->non_resident) {
NInoSetNonResident(ni);
if (NInoCompressed(ni) || NInoSparse(ni)) {
if (NInoCompressed(ni) && a->data.non_resident.
compression_unit != 4) {
ntfs_error(vi->i_sb, "Found "
"non-standard "
"compression unit (%u "
"instead of 4). "
"Cannot handle this.",
a->data.non_resident.
compression_unit);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U <<
(a->data.non_resident.
compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.
compressed.
block_size) - 1;
ni->itype.compressed.block_clusters =
1U << a->data.
non_resident.
compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits =
0;
ni->itype.compressed.block_clusters =
0;
}
ni->itype.compressed.size = sle64_to_cpu(
a->data.non_resident.
compressed_size);
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of $DATA "
"attribute has non zero "
"lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(
a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
} else { /* Resident attribute. */
vi->i_size = ni->initialized_size = le32_to_cpu(
a->data.resident.value_length);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(
a->data.resident.value_offset);
if (vi->i_size > ni->allocated_size) {
ntfs_error(vi->i_sb, "Resident data attribute "
"is corrupt (size exceeds "
"allocation).");
goto unm_err_out;
}
}
no_data_attr_special_case:
/* We are done with the mft record, so we release it. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
m = NULL;
ctx = NULL;
/* Setup the operations for this inode. */
vi->i_op = &ntfs_file_inode_ops;
vi->i_fop = &ntfs_file_ops;
vi->i_mapping->a_ops = &ntfs_normal_aops;
if (NInoMstProtected(ni))
vi->i_mapping->a_ops = &ntfs_mst_aops;
else if (NInoCompressed(ni))
vi->i_mapping->a_ops = &ntfs_compressed_aops;
}
/*
* The number of 512-byte blocks used on disk (for stat). This is in so
* far inaccurate as it doesn't account for any named streams or other
* special non-resident attributes, but that is how Windows works, too,
* so we are at least consistent with Windows, if not entirely
* consistent with the Linux Way. Doing it the Linux Way would cause a
* significant slowdown as it would involve iterating over all
* attributes in the mft record and adding the allocated/compressed
* sizes of all non-resident attributes present to give us the Linux
* correct size that should go into i_blocks (after division by 512).
*/
if (S_ISREG(vi->i_mode) && (NInoCompressed(ni) || NInoSparse(ni)))
vi->i_blocks = ni->itype.compressed.size >> 9;
else
vi->i_blocks = ni->allocated_size >> 9;
ntfs_debug("Done.");
return 0;
iput_unm_err_out:
iput(bvi);
unm_err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(ni);
err_out:
ntfs_error(vol->sb, "Failed with error code %i. Marking corrupt "
"inode 0x%lx as bad. Run chkdsk.", err, vi->i_ino);
make_bad_inode(vi);
if (err != -EOPNOTSUPP && err != -ENOMEM)
NVolSetErrors(vol);
return err;
}
/**
* ntfs_read_locked_attr_inode - read an attribute inode from its base inode
* @base_vi: base inode
* @vi: attribute inode to read
*
* ntfs_read_locked_attr_inode() is called from ntfs_attr_iget() to read the
* attribute inode described by @vi into memory from the base mft record
* described by @base_ni.
*
* ntfs_read_locked_attr_inode() maps, pins and locks the base inode for
* reading and looks up the attribute described by @vi before setting up the
* necessary fields in @vi as well as initializing the ntfs inode.
*
* Q: What locks are held when the function is called?
* A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
*
* Note this cannot be called for AT_INDEX_ALLOCATION.
*/
static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
{
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni, *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
int err = 0;
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
vi->i_mtime = base_vi->i_mtime;
inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
vi->i_atime = base_vi->i_atime;
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
vi->i_mode = base_vi->i_mode & ~S_IFMT;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (!ctx) {
err = -ENOMEM;
goto unm_err_out;
}
/* Find the attribute. */
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto unm_err_out;
a = ctx->attr;
if (a->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_SPARSE)) {
if (a->flags & ATTR_COMPRESSION_MASK) {
NInoSetCompressed(ni);
if ((ni->type != AT_DATA) || (ni->type == AT_DATA &&
ni->name_len)) {
ntfs_error(vi->i_sb, "Found compressed "
"non-data or named data "
"attribute. Please report "
"you saw this message to "
"linux-ntfs-dev@lists."
"sourceforge.net");
goto unm_err_out;
}
if (vol->cluster_size > 4096) {
ntfs_error(vi->i_sb, "Found compressed "
"attribute but compression is "
"disabled due to cluster size "
"(%i) > 4kiB.",
vol->cluster_size);
goto unm_err_out;
}
if ((a->flags & ATTR_COMPRESSION_MASK) !=
ATTR_IS_COMPRESSED) {
ntfs_error(vi->i_sb, "Found unknown "
"compression method.");
goto unm_err_out;
}
}
/*
* The compressed/sparse flag set in an index root just means
* to compress all files.
*/
if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
"but the attribute is %s. Please "
"report you saw this message to "
"[email protected]",
NInoCompressed(ni) ? "compressed" :
"sparse");
goto unm_err_out;
}
if (a->flags & ATTR_IS_SPARSE)
NInoSetSparse(ni);
}
if (a->flags & ATTR_IS_ENCRYPTED) {
if (NInoCompressed(ni)) {
ntfs_error(vi->i_sb, "Found encrypted and compressed "
"data.");
goto unm_err_out;
}
/*
* The encryption flag set in an index root just means to
* encrypt all files.
*/
if (NInoMstProtected(ni) && ni->type != AT_INDEX_ROOT) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
"but the attribute is encrypted. "
"Please report you saw this message "
"to [email protected]."
"net");
goto unm_err_out;
}
if (ni->type != AT_DATA) {
ntfs_error(vi->i_sb, "Found encrypted non-data "
"attribute.");
goto unm_err_out;
}
NInoSetEncrypted(ni);
}
if (!a->non_resident) {
/* Ensure the attribute name is placed before the value. */
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(a->data.resident.value_offset)))) {
ntfs_error(vol->sb, "Attribute name is placed after "
"the attribute value.");
goto unm_err_out;
}
if (NInoMstProtected(ni)) {
ntfs_error(vi->i_sb, "Found mst protected attribute "
"but the attribute is resident. "
"Please report you saw this message to "
"[email protected]");
goto unm_err_out;
}
vi->i_size = ni->initialized_size = le32_to_cpu(
a->data.resident.value_length);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
if (vi->i_size > ni->allocated_size) {
ntfs_error(vi->i_sb, "Resident attribute is corrupt "
"(size exceeds allocation).");
goto unm_err_out;
}
} else {
NInoSetNonResident(ni);
/*
* Ensure the attribute name is placed before the mapping pairs
* array.
*/
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(
a->data.non_resident.mapping_pairs_offset)))) {
ntfs_error(vol->sb, "Attribute name is placed after "
"the mapping pairs array.");
goto unm_err_out;
}
if (NInoCompressed(ni) || NInoSparse(ni)) {
if (NInoCompressed(ni) && a->data.non_resident.
compression_unit != 4) {
ntfs_error(vi->i_sb, "Found non-standard "
"compression unit (%u instead "
"of 4). Cannot handle this.",
a->data.non_resident.
compression_unit);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U <<
(a->data.non_resident.
compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.compressed.
block_size) - 1;
ni->itype.compressed.block_clusters = 1U <<
a->data.non_resident.
compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits = 0;
ni->itype.compressed.block_clusters = 0;
}
ni->itype.compressed.size = sle64_to_cpu(
a->data.non_resident.compressed_size);
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of attribute has "
"non-zero lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
}
vi->i_mapping->a_ops = &ntfs_normal_aops;
if (NInoMstProtected(ni))
vi->i_mapping->a_ops = &ntfs_mst_aops;
else if (NInoCompressed(ni))
vi->i_mapping->a_ops = &ntfs_compressed_aops;
if ((NInoCompressed(ni) || NInoSparse(ni)) && ni->type != AT_INDEX_ROOT)
vi->i_blocks = ni->itype.compressed.size >> 9;
else
vi->i_blocks = ni->allocated_size >> 9;
/*
* Make sure the base inode does not go away and attach it to the
* attribute inode.
*/
igrab(base_vi);
ni->ext.base_ntfs_ino = base_ni;
ni->nr_extents = -1;
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
ntfs_debug("Done.");
return 0;
unm_err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
err_out:
ntfs_error(vol->sb, "Failed with error code %i while reading attribute "
"inode (mft_no 0x%lx, type 0x%x, name_len %i). "
"Marking corrupt inode and base inode 0x%lx as bad. "
"Run chkdsk.", err, vi->i_ino, ni->type, ni->name_len,
base_vi->i_ino);
make_bad_inode(vi);
if (err != -ENOMEM)
NVolSetErrors(vol);
return err;
}
/**
* ntfs_read_locked_index_inode - read an index inode from its base inode
* @base_vi: base inode
* @vi: index inode to read
*
* ntfs_read_locked_index_inode() is called from ntfs_index_iget() to read the
* index inode described by @vi into memory from the base mft record described
* by @base_ni.
*
* ntfs_read_locked_index_inode() maps, pins and locks the base inode for
* reading and looks up the attributes relating to the index described by @vi
* before setting up the necessary fields in @vi as well as initializing the
* ntfs inode.
*
* Note, index inodes are essentially attribute inodes (NInoAttr() is true)
* with the attribute type set to AT_INDEX_ALLOCATION. Apart from that, they
* are setup like directory inodes since directories are a special case of
* indices ao they need to be treated in much the same way. Most importantly,
* for small indices the index allocation attribute might not actually exist.
* However, the index root attribute always exists but this does not need to
* have an inode associated with it and this is why we define a new inode type
* index. Also, like for directories, we need to have an attribute inode for
* the bitmap attribute corresponding to the index allocation attribute and we
* can store this in the appropriate field of the inode, just like we do for
* normal directory inodes.
*
* Q: What locks are held when the function is called?
* A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
*
* Return 0 on success and -errno on error. In the error case, the inode will
* have had make_bad_inode() executed on it.
*/
static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
{
loff_t bvi_size;
ntfs_volume *vol = NTFS_SB(vi->i_sb);
ntfs_inode *ni, *base_ni, *bni;
struct inode *bvi;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
INDEX_ROOT *ir;
u8 *ir_end, *index_end;
int err = 0;
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
vi->i_mtime = base_vi->i_mtime;
inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
vi->i_atime = base_vi->i_atime;
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
vi->i_mode = base_vi->i_mode & ~S_IFMT;
/* Map the mft record for the base inode. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (!ctx) {
err = -ENOMEM;
goto unm_err_out;
}
/* Find the index root attribute. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is "
"missing.");
goto unm_err_out;
}
a = ctx->attr;
/* Set up the state. */
if (unlikely(a->non_resident)) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute is not resident.");
goto unm_err_out;
}
/* Ensure the attribute name is placed before the value. */
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(a->data.resident.value_offset)))) {
ntfs_error(vol->sb, "$INDEX_ROOT attribute name is placed "
"after the attribute value.");
goto unm_err_out;
}
/*
* Compressed/encrypted/sparse index root is not allowed, except for
* directories of course but those are not dealt with here.
*/
if (a->flags & (ATTR_COMPRESSION_MASK | ATTR_IS_ENCRYPTED |
ATTR_IS_SPARSE)) {
ntfs_error(vi->i_sb, "Found compressed/encrypted/sparse index "
"root attribute.");
goto unm_err_out;
}
ir = (INDEX_ROOT*)((u8*)a + le16_to_cpu(a->data.resident.value_offset));
ir_end = (u8*)ir + le32_to_cpu(a->data.resident.value_length);
if (ir_end > (u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(vi->i_sb, "$INDEX_ROOT attribute is corrupt.");
goto unm_err_out;
}
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
if (index_end > ir_end) {
ntfs_error(vi->i_sb, "Index is corrupt.");
goto unm_err_out;
}
if (ir->type) {
ntfs_error(vi->i_sb, "Index type is not 0 (type is 0x%x).",
le32_to_cpu(ir->type));
goto unm_err_out;
}
ni->itype.index.collation_rule = ir->collation_rule;
ntfs_debug("Index collation rule is 0x%x.",
le32_to_cpu(ir->collation_rule));
ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
if (!is_power_of_2(ni->itype.index.block_size)) {
ntfs_error(vi->i_sb, "Index block size (%u) is not a power of "
"two.", ni->itype.index.block_size);
goto unm_err_out;
}
if (ni->itype.index.block_size > PAGE_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
"(%ld) is not supported. Sorry.",
ni->itype.index.block_size, PAGE_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
if (ni->itype.index.block_size < NTFS_BLOCK_SIZE) {
ntfs_error(vi->i_sb, "Index block size (%u) < NTFS_BLOCK_SIZE "
"(%i) is not supported. Sorry.",
ni->itype.index.block_size, NTFS_BLOCK_SIZE);
err = -EOPNOTSUPP;
goto unm_err_out;
}
ni->itype.index.block_size_bits = ffs(ni->itype.index.block_size) - 1;
/* Determine the size of a vcn in the index. */
if (vol->cluster_size <= ni->itype.index.block_size) {
ni->itype.index.vcn_size = vol->cluster_size;
ni->itype.index.vcn_size_bits = vol->cluster_size_bits;
} else {
ni->itype.index.vcn_size = vol->sector_size;
ni->itype.index.vcn_size_bits = vol->sector_size_bits;
}
/* Check for presence of index allocation attribute. */
if (!(ir->index.flags & LARGE_INDEX)) {
/* No index allocation. */
vi->i_size = ni->initialized_size = ni->allocated_size = 0;
/* We are done with the mft record, so we release it. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
m = NULL;
ctx = NULL;
goto skip_large_index_stuff;
} /* LARGE_INDEX: Index allocation present. Setup state. */
NInoSetIndexAllocPresent(ni);
/* Find index allocation attribute. */
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(AT_INDEX_ALLOCATION, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"not present but $INDEX_ROOT "
"indicated it is.");
else
ntfs_error(vi->i_sb, "Failed to lookup "
"$INDEX_ALLOCATION attribute.");
goto unm_err_out;
}
a = ctx->attr;
if (!a->non_resident) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"resident.");
goto unm_err_out;
}
/*
* Ensure the attribute name is placed before the mapping pairs array.
*/
if (unlikely(a->name_length && (le16_to_cpu(a->name_offset) >=
le16_to_cpu(
a->data.non_resident.mapping_pairs_offset)))) {
ntfs_error(vol->sb, "$INDEX_ALLOCATION attribute name is "
"placed after the mapping pairs array.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"encrypted.");
goto unm_err_out;
}
if (a->flags & ATTR_IS_SPARSE) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is sparse.");
goto unm_err_out;
}
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(vi->i_sb, "$INDEX_ALLOCATION attribute is "
"compressed.");
goto unm_err_out;
}
if (a->data.non_resident.lowest_vcn) {
ntfs_error(vi->i_sb, "First extent of $INDEX_ALLOCATION "
"attribute has non zero lowest_vcn.");
goto unm_err_out;
}
vi->i_size = sle64_to_cpu(a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(a->data.non_resident.allocated_size);
/*
* We are done with the mft record, so we release it. Otherwise
* we would deadlock in ntfs_attr_iget().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
m = NULL;
ctx = NULL;
/* Get the index bitmap attribute inode. */
bvi = ntfs_attr_iget(base_vi, AT_BITMAP, ni->name, ni->name_len);
if (IS_ERR(bvi)) {
ntfs_error(vi->i_sb, "Failed to get bitmap attribute.");
err = PTR_ERR(bvi);
goto unm_err_out;
}
bni = NTFS_I(bvi);
if (NInoCompressed(bni) || NInoEncrypted(bni) ||
NInoSparse(bni)) {
ntfs_error(vi->i_sb, "$BITMAP attribute is compressed and/or "
"encrypted and/or sparse.");
goto iput_unm_err_out;
}
/* Consistency check bitmap size vs. index allocation size. */
bvi_size = i_size_read(bvi);
if ((bvi_size << 3) < (vi->i_size >> ni->itype.index.block_size_bits)) {
ntfs_error(vi->i_sb, "Index bitmap too small (0x%llx) for "
"index allocation (0x%llx).", bvi_size << 3,
vi->i_size);
goto iput_unm_err_out;
}
iput(bvi);
skip_large_index_stuff:
/* Setup the operations for this index inode. */
vi->i_mapping->a_ops = &ntfs_mst_aops;
vi->i_blocks = ni->allocated_size >> 9;
/*
* Make sure the base inode doesn't go away and attach it to the
* index inode.
*/
igrab(base_vi);
ni->ext.base_ntfs_ino = base_ni;
ni->nr_extents = -1;
ntfs_debug("Done.");
return 0;
iput_unm_err_out:
iput(bvi);
unm_err_out:
if (!err)
err = -EIO;
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
err_out:
ntfs_error(vi->i_sb, "Failed with error code %i while reading index "
"inode (mft_no 0x%lx, name_len %i.", err, vi->i_ino,
ni->name_len);
make_bad_inode(vi);
if (err != -EOPNOTSUPP && err != -ENOMEM)
NVolSetErrors(vol);
return err;
}
/*
* The MFT inode has special locking, so teach the lock validator
* about this by splitting off the locking rules of the MFT from
* the locking rules of other inodes. The MFT inode can never be
* accessed from the VFS side (or even internally), only by the
* map_mft functions.
*/
static struct lock_class_key mft_ni_runlist_lock_key, mft_ni_mrec_lock_key;
/**
* ntfs_read_inode_mount - special read_inode for mount time use only
* @vi: inode to read
*
* Read inode FILE_MFT at mount time, only called with super_block lock
* held from within the read_super() code path.
*
* This function exists because when it is called the page cache for $MFT/$DATA
* is not initialized and hence we cannot get at the contents of mft records
* by calling map_mft_record*().
*
* Further it needs to cope with the circular references problem, i.e. cannot
* load any attributes other than $ATTRIBUTE_LIST until $DATA is loaded, because
* we do not know where the other extent mft records are yet and again, because
* we cannot call map_mft_record*() yet. Obviously this applies only when an
* attribute list is actually present in $MFT inode.
*
* We solve these problems by starting with the $DATA attribute before anything
* else and iterating using ntfs_attr_lookup($DATA) over all extents. As each
* extent is found, we ntfs_mapping_pairs_decompress() including the implied
* ntfs_runlists_merge(). Each step of the iteration necessarily provides
* sufficient information for the next step to complete.
*
* This should work but there are two possible pit falls (see inline comments
* below), but only time will tell if they are real pits or just smoke...
*/
int ntfs_read_inode_mount(struct inode *vi)
{
VCN next_vcn, last_vcn, highest_vcn;
s64 block;
struct super_block *sb = vi->i_sb;
ntfs_volume *vol = NTFS_SB(sb);
struct buffer_head *bh;
ntfs_inode *ni;
MFT_RECORD *m = NULL;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
unsigned int i, nr_blocks;
int err;
ntfs_debug("Entering.");
/* Initialize the ntfs specific part of @vi. */
ntfs_init_big_inode(vi);
ni = NTFS_I(vi);
/* Setup the data attribute. It is special as it is mst protected. */
NInoSetNonResident(ni);
NInoSetMstProtected(ni);
NInoSetSparseDisabled(ni);
ni->type = AT_DATA;
ni->name = NULL;
ni->name_len = 0;
/*
* This sets up our little cheat allowing us to reuse the async read io
* completion handler for directories.
*/
ni->itype.index.block_size = vol->mft_record_size;
ni->itype.index.block_size_bits = vol->mft_record_size_bits;
/* Very important! Needed to be able to call map_mft_record*(). */
vol->mft_ino = vi;
/* Allocate enough memory to read the first mft record. */
if (vol->mft_record_size > 64 * 1024) {
ntfs_error(sb, "Unsupported mft record size %i (max 64kiB).",
vol->mft_record_size);
goto err_out;
}
i = vol->mft_record_size;
if (i < sb->s_blocksize)
i = sb->s_blocksize;
m = (MFT_RECORD*)ntfs_malloc_nofs(i);
if (!m) {
ntfs_error(sb, "Failed to allocate buffer for $MFT record 0.");
goto err_out;
}
/* Determine the first block of the $MFT/$DATA attribute. */
block = vol->mft_lcn << vol->cluster_size_bits >>
sb->s_blocksize_bits;
nr_blocks = vol->mft_record_size >> sb->s_blocksize_bits;
if (!nr_blocks)
nr_blocks = 1;
/* Load $MFT/$DATA's first mft record. */
for (i = 0; i < nr_blocks; i++) {
bh = sb_bread(sb, block++);
if (!bh) {
ntfs_error(sb, "Device read failed.");
goto err_out;
}
memcpy((char*)m + (i << sb->s_blocksize_bits), bh->b_data,
sb->s_blocksize);
brelse(bh);
}
if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) {
ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.",
le32_to_cpu(m->bytes_allocated), vol->mft_record_size);
goto err_out;
}
/* Apply the mst fixups. */
if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) {
/* FIXME: Try to use the $MFTMirr now. */
ntfs_error(sb, "MST fixup failed. $MFT is corrupt.");
goto err_out;
}
/* Sanity check offset to the first attribute */
if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
le16_to_cpu(m->attrs_offset));
goto err_out;
}
/* Need this to sanity check attribute list references to $MFT. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
/* Provides read_folio() for map_mft_record(). */
vi->i_mapping->a_ops = &ntfs_mst_aops;
ctx = ntfs_attr_get_search_ctx(ni, m);
if (!ctx) {
err = -ENOMEM;
goto err_out;
}
/* Find the attribute list attribute if present. */
err = ntfs_attr_lookup(AT_ATTRIBUTE_LIST, NULL, 0, 0, 0, NULL, 0, ctx);
if (err) {
if (unlikely(err != -ENOENT)) {
ntfs_error(sb, "Failed to lookup attribute list "
"attribute. You should run chkdsk.");
goto put_err_out;
}
} else /* if (!err) */ {
ATTR_LIST_ENTRY *al_entry, *next_al_entry;
u8 *al_end;
static const char *es = " Not allowed. $MFT is corrupt. "
"You should run chkdsk.";
ntfs_debug("Attribute list attribute found in $MFT.");
NInoSetAttrList(ni);
a = ctx->attr;
if (a->flags & ATTR_COMPRESSION_MASK) {
ntfs_error(sb, "Attribute list attribute is "
"compressed.%s", es);
goto put_err_out;
}
if (a->flags & ATTR_IS_ENCRYPTED ||
a->flags & ATTR_IS_SPARSE) {
if (a->non_resident) {
ntfs_error(sb, "Non-resident attribute list "
"attribute is encrypted/"
"sparse.%s", es);
goto put_err_out;
}
ntfs_warning(sb, "Resident attribute list attribute "
"in $MFT system file is marked "
"encrypted/sparse which is not true. "
"However, Windows allows this and "
"chkdsk does not detect or correct it "
"so we will just ignore the invalid "
"flags and pretend they are not set.");
}
/* Now allocate memory for the attribute list. */
ni->attr_list_size = (u32)ntfs_attr_size(a);
if (!ni->attr_list_size) {
ntfs_error(sb, "Attr_list_size is zero");
goto put_err_out;
}
ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size);
if (!ni->attr_list) {
ntfs_error(sb, "Not enough memory to allocate buffer "
"for attribute list.");
goto put_err_out;
}
if (a->non_resident) {
NInoSetAttrListNonResident(ni);
if (a->data.non_resident.lowest_vcn) {
ntfs_error(sb, "Attribute list has non zero "
"lowest_vcn. $MFT is corrupt. "
"You should run chkdsk.");
goto put_err_out;
}
/* Setup the runlist. */
ni->attr_list_rl.rl = ntfs_mapping_pairs_decompress(vol,
a, NULL);
if (IS_ERR(ni->attr_list_rl.rl)) {
err = PTR_ERR(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
ntfs_error(sb, "Mapping pairs decompression "
"failed with error code %i.",
-err);
goto put_err_out;
}
/* Now load the attribute list. */
if ((err = load_attribute_list(vol, &ni->attr_list_rl,
ni->attr_list, ni->attr_list_size,
sle64_to_cpu(a->data.
non_resident.initialized_size)))) {
ntfs_error(sb, "Failed to load attribute list "
"attribute with error code %i.",
-err);
goto put_err_out;
}
} else /* if (!ctx.attr->non_resident) */ {
if ((u8*)a + le16_to_cpu(
a->data.resident.value_offset) +
le32_to_cpu(
a->data.resident.value_length) >
(u8*)ctx->mrec + vol->mft_record_size) {
ntfs_error(sb, "Corrupt attribute list "
"attribute.");
goto put_err_out;
}
/* Now copy the attribute list. */
memcpy(ni->attr_list, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
le32_to_cpu(
a->data.resident.value_length));
}
/* The attribute list is now setup in memory. */
/*
* FIXME: I don't know if this case is actually possible.
* According to logic it is not possible but I have seen too
* many weird things in MS software to rely on logic... Thus we
* perform a manual search and make sure the first $MFT/$DATA
* extent is in the base inode. If it is not we abort with an
* error and if we ever see a report of this error we will need
* to do some magic in order to have the necessary mft record
* loaded and in the right place in the page cache. But
* hopefully logic will prevail and this never happens...
*/
al_entry = (ATTR_LIST_ENTRY*)ni->attr_list;
al_end = (u8*)al_entry + ni->attr_list_size;
for (;; al_entry = next_al_entry) {
/* Out of bounds check. */
if ((u8*)al_entry < ni->attr_list ||
(u8*)al_entry > al_end)
goto em_put_err_out;
/* Catch the end of the attribute list. */
if ((u8*)al_entry == al_end)
goto em_put_err_out;
if (!al_entry->length)
goto em_put_err_out;
if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
le16_to_cpu(al_entry->length) > al_end)
goto em_put_err_out;
next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
le16_to_cpu(al_entry->length));
if (le32_to_cpu(al_entry->type) > le32_to_cpu(AT_DATA))
goto em_put_err_out;
if (AT_DATA != al_entry->type)
continue;
/* We want an unnamed attribute. */
if (al_entry->name_length)
goto em_put_err_out;
/* Want the first entry, i.e. lowest_vcn == 0. */
if (al_entry->lowest_vcn)
goto em_put_err_out;
/* First entry has to be in the base mft record. */
if (MREF_LE(al_entry->mft_reference) != vi->i_ino) {
/* MFT references do not match, logic fails. */
ntfs_error(sb, "BUG: The first $DATA extent "
"of $MFT is not in the base "
"mft record. Please report "
"you saw this message to "
"linux-ntfs-dev@lists."
"sourceforge.net");
goto put_err_out;
} else {
/* Sequence numbers must match. */
if (MSEQNO_LE(al_entry->mft_reference) !=
ni->seq_no)
goto em_put_err_out;
/* Got it. All is ok. We can stop now. */
break;
}
}
}
ntfs_attr_reinit_search_ctx(ctx);
/* Now load all attribute extents. */
a = NULL;
next_vcn = last_vcn = highest_vcn = 0;
while (!(err = ntfs_attr_lookup(AT_DATA, NULL, 0, 0, next_vcn, NULL, 0,
ctx))) {
runlist_element *nrl;
/* Cache the current attribute. */
a = ctx->attr;
/* $MFT must be non-resident. */
if (!a->non_resident) {
ntfs_error(sb, "$MFT must be non-resident but a "
"resident extent was found. $MFT is "
"corrupt. Run chkdsk.");
goto put_err_out;
}
/* $MFT must be uncompressed and unencrypted. */
if (a->flags & ATTR_COMPRESSION_MASK ||
a->flags & ATTR_IS_ENCRYPTED ||
a->flags & ATTR_IS_SPARSE) {
ntfs_error(sb, "$MFT must be uncompressed, "
"non-sparse, and unencrypted but a "
"compressed/sparse/encrypted extent "
"was found. $MFT is corrupt. Run "
"chkdsk.");
goto put_err_out;
}
/*
* Decompress the mapping pairs array of this extent and merge
* the result into the existing runlist. No need for locking
* as we have exclusive access to the inode at this time and we
* are a mount in progress task, too.
*/
nrl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
if (IS_ERR(nrl)) {
ntfs_error(sb, "ntfs_mapping_pairs_decompress() "
"failed with error code %ld. $MFT is "
"corrupt.", PTR_ERR(nrl));
goto put_err_out;
}
ni->runlist.rl = nrl;
/* Are we in the first extent? */
if (!next_vcn) {
if (a->data.non_resident.lowest_vcn) {
ntfs_error(sb, "First extent of $DATA "
"attribute has non zero "
"lowest_vcn. $MFT is corrupt. "
"You should run chkdsk.");
goto put_err_out;
}
/* Get the last vcn in the $DATA attribute. */
last_vcn = sle64_to_cpu(
a->data.non_resident.allocated_size)
>> vol->cluster_size_bits;
/* Fill in the inode size. */
vi->i_size = sle64_to_cpu(
a->data.non_resident.data_size);
ni->initialized_size = sle64_to_cpu(
a->data.non_resident.initialized_size);
ni->allocated_size = sle64_to_cpu(
a->data.non_resident.allocated_size);
/*
* Verify the number of mft records does not exceed
* 2^32 - 1.
*/
if ((vi->i_size >> vol->mft_record_size_bits) >=
(1ULL << 32)) {
ntfs_error(sb, "$MFT is too big! Aborting.");
goto put_err_out;
}
/*
* We have got the first extent of the runlist for
* $MFT which means it is now relatively safe to call
* the normal ntfs_read_inode() function.
* Complete reading the inode, this will actually
* re-read the mft record for $MFT, this time entering
* it into the page cache with which we complete the
* kick start of the volume. It should be safe to do
* this now as the first extent of $MFT/$DATA is
* already known and we would hope that we don't need
* further extents in order to find the other
* attributes belonging to $MFT. Only time will tell if
* this is really the case. If not we will have to play
* magic at this point, possibly duplicating a lot of
* ntfs_read_inode() at this point. We will need to
* ensure we do enough of its work to be able to call
* ntfs_read_inode() on extents of $MFT/$DATA. But lets
* hope this never happens...
*/
ntfs_read_locked_inode(vi);
if (is_bad_inode(vi)) {
ntfs_error(sb, "ntfs_read_inode() of $MFT "
"failed. BUG or corrupt $MFT. "
"Run chkdsk and if no errors "
"are found, please report you "
"saw this message to "
"linux-ntfs-dev@lists."
"sourceforge.net");
ntfs_attr_put_search_ctx(ctx);
/* Revert to the safe super operations. */
ntfs_free(m);
return -1;
}
/*
* Re-initialize some specifics about $MFT's inode as
* ntfs_read_inode() will have set up the default ones.
*/
/* Set uid and gid to root. */
vi->i_uid = GLOBAL_ROOT_UID;
vi->i_gid = GLOBAL_ROOT_GID;
/* Regular file. No access for anyone. */
vi->i_mode = S_IFREG;
/* No VFS initiated operations allowed for $MFT. */
vi->i_op = &ntfs_empty_inode_ops;
vi->i_fop = &ntfs_empty_file_ops;
}
/* Get the lowest vcn for the next extent. */
highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
next_vcn = highest_vcn + 1;
/* Only one extent or error, which we catch below. */
if (next_vcn <= 0)
break;
/* Avoid endless loops due to corruption. */
if (next_vcn < sle64_to_cpu(
a->data.non_resident.lowest_vcn)) {
ntfs_error(sb, "$MFT has corrupt attribute list "
"attribute. Run chkdsk.");
goto put_err_out;
}
}
if (err != -ENOENT) {
ntfs_error(sb, "Failed to lookup $MFT/$DATA attribute extent. "
"$MFT is corrupt. Run chkdsk.");
goto put_err_out;
}
if (!a) {
ntfs_error(sb, "$MFT/$DATA attribute not found. $MFT is "
"corrupt. Run chkdsk.");
goto put_err_out;
}
if (highest_vcn && highest_vcn != last_vcn - 1) {
ntfs_error(sb, "Failed to load the complete runlist for "
"$MFT/$DATA. Driver bug or corrupt $MFT. "
"Run chkdsk.");
ntfs_debug("highest_vcn = 0x%llx, last_vcn - 1 = 0x%llx",
(unsigned long long)highest_vcn,
(unsigned long long)last_vcn - 1);
goto put_err_out;
}
ntfs_attr_put_search_ctx(ctx);
ntfs_debug("Done.");
ntfs_free(m);
/*
* Split the locking rules of the MFT inode from the
* locking rules of other inodes:
*/
lockdep_set_class(&ni->runlist.lock, &mft_ni_runlist_lock_key);
lockdep_set_class(&ni->mrec_lock, &mft_ni_mrec_lock_key);
return 0;
em_put_err_out:
ntfs_error(sb, "Couldn't find first extent of $DATA attribute in "
"attribute list. $MFT is corrupt. Run chkdsk.");
put_err_out:
ntfs_attr_put_search_ctx(ctx);
err_out:
ntfs_error(sb, "Failed. Marking inode as bad.");
make_bad_inode(vi);
ntfs_free(m);
return -1;
}
static void __ntfs_clear_inode(ntfs_inode *ni)
{
/* Free all alocated memory. */
down_write(&ni->runlist.lock);
if (ni->runlist.rl) {
ntfs_free(ni->runlist.rl);
ni->runlist.rl = NULL;
}
up_write(&ni->runlist.lock);
if (ni->attr_list) {
ntfs_free(ni->attr_list);
ni->attr_list = NULL;
}
down_write(&ni->attr_list_rl.lock);
if (ni->attr_list_rl.rl) {
ntfs_free(ni->attr_list_rl.rl);
ni->attr_list_rl.rl = NULL;
}
up_write(&ni->attr_list_rl.lock);
if (ni->name_len && ni->name != I30) {
/* Catch bugs... */
BUG_ON(!ni->name);
kfree(ni->name);
}
}
void ntfs_clear_extent_inode(ntfs_inode *ni)
{
ntfs_debug("Entering for inode 0x%lx.", ni->mft_no);
BUG_ON(NInoAttr(ni));
BUG_ON(ni->nr_extents != -1);
#ifdef NTFS_RW
if (NInoDirty(ni)) {
if (!is_bad_inode(VFS_I(ni->ext.base_ntfs_ino)))
ntfs_error(ni->vol->sb, "Clearing dirty extent inode! "
"Losing data! This is a BUG!!!");
// FIXME: Do something!!!
}
#endif /* NTFS_RW */
__ntfs_clear_inode(ni);
/* Bye, bye... */
ntfs_destroy_extent_inode(ni);
}
/**
* ntfs_evict_big_inode - clean up the ntfs specific part of an inode
* @vi: vfs inode pending annihilation
*
* When the VFS is going to remove an inode from memory, ntfs_clear_big_inode()
* is called, which deallocates all memory belonging to the NTFS specific part
* of the inode and returns.
*
* If the MFT record is dirty, we commit it before doing anything else.
*/
void ntfs_evict_big_inode(struct inode *vi)
{
ntfs_inode *ni = NTFS_I(vi);
truncate_inode_pages_final(&vi->i_data);
clear_inode(vi);
#ifdef NTFS_RW
if (NInoDirty(ni)) {
bool was_bad = (is_bad_inode(vi));
/* Committing the inode also commits all extent inodes. */
ntfs_commit_inode(vi);
if (!was_bad && (is_bad_inode(vi) || NInoDirty(ni))) {
ntfs_error(vi->i_sb, "Failed to commit dirty inode "
"0x%lx. Losing data!", vi->i_ino);
// FIXME: Do something!!!
}
}
#endif /* NTFS_RW */
/* No need to lock at this stage as no one else has a reference. */
if (ni->nr_extents > 0) {
int i;
for (i = 0; i < ni->nr_extents; i++)
ntfs_clear_extent_inode(ni->ext.extent_ntfs_inos[i]);
kfree(ni->ext.extent_ntfs_inos);
}
__ntfs_clear_inode(ni);
if (NInoAttr(ni)) {
/* Release the base inode if we are holding it. */
if (ni->nr_extents == -1) {
iput(VFS_I(ni->ext.base_ntfs_ino));
ni->nr_extents = 0;
ni->ext.base_ntfs_ino = NULL;
}
}
BUG_ON(ni->page);
if (!atomic_dec_and_test(&ni->count))
BUG();
return;
}
/**
* ntfs_show_options - show mount options in /proc/mounts
* @sf: seq_file in which to write our mount options
* @root: root of the mounted tree whose mount options to display
*
* Called by the VFS once for each mounted ntfs volume when someone reads
* /proc/mounts in order to display the NTFS specific mount options of each
* mount. The mount options of fs specified by @root are written to the seq file
* @sf and success is returned.
*/
int ntfs_show_options(struct seq_file *sf, struct dentry *root)
{
ntfs_volume *vol = NTFS_SB(root->d_sb);
int i;
seq_printf(sf, ",uid=%i", from_kuid_munged(&init_user_ns, vol->uid));
seq_printf(sf, ",gid=%i", from_kgid_munged(&init_user_ns, vol->gid));
if (vol->fmask == vol->dmask)
seq_printf(sf, ",umask=0%o", vol->fmask);
else {
seq_printf(sf, ",fmask=0%o", vol->fmask);
seq_printf(sf, ",dmask=0%o", vol->dmask);
}
seq_printf(sf, ",nls=%s", vol->nls_map->charset);
if (NVolCaseSensitive(vol))
seq_printf(sf, ",case_sensitive");
if (NVolShowSystemFiles(vol))
seq_printf(sf, ",show_sys_files");
if (!NVolSparseEnabled(vol))
seq_printf(sf, ",disable_sparse");
for (i = 0; on_errors_arr[i].val; i++) {
if (on_errors_arr[i].val & vol->on_errors)
seq_printf(sf, ",errors=%s", on_errors_arr[i].str);
}
seq_printf(sf, ",mft_zone_multiplier=%i", vol->mft_zone_multiplier);
return 0;
}
#ifdef NTFS_RW
static const char *es = " Leaving inconsistent metadata. Unmount and run "
"chkdsk.";
/**
* ntfs_truncate - called when the i_size of an ntfs inode is changed
* @vi: inode for which the i_size was changed
*
* We only support i_size changes for normal files at present, i.e. not
* compressed and not encrypted. This is enforced in ntfs_setattr(), see
* below.
*
* The kernel guarantees that @vi is a regular file (S_ISREG() is true) and
* that the change is allowed.
*
* This implies for us that @vi is a file inode rather than a directory, index,
* or attribute inode as well as that @vi is a base inode.
*
* Returns 0 on success or -errno on error.
*
* Called with ->i_mutex held.
*/
int ntfs_truncate(struct inode *vi)
{
s64 new_size, old_size, nr_freed, new_alloc_size, old_alloc_size;
VCN highest_vcn;
unsigned long flags;
ntfs_inode *base_ni, *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
ATTR_RECORD *a;
const char *te = " Leaving file length out of sync with i_size.";
int err, mp_size, size_change, alloc_change;
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
BUG_ON(NInoAttr(ni));
BUG_ON(S_ISDIR(vi->i_mode));
BUG_ON(NInoMstProtected(ni));
BUG_ON(ni->nr_extents < 0);
retry_truncate:
/*
* Lock the runlist for writing and map the mft record to ensure it is
* safe to mess with the attribute runlist and sizes.
*/
down_write(&ni->runlist.lock);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
ntfs_error(vi->i_sb, "Failed to map mft record for inode 0x%lx "
"(error code %d).%s", vi->i_ino, err, te);
ctx = NULL;
m = NULL;
goto old_bad_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
ntfs_error(vi->i_sb, "Failed to allocate a search context for "
"inode 0x%lx (not enough memory).%s",
vi->i_ino, te);
err = -ENOMEM;
goto old_bad_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT) {
ntfs_error(vi->i_sb, "Open attribute is missing from "
"mft record. Inode 0x%lx is corrupt. "
"Run chkdsk.%s", vi->i_ino, te);
err = -EIO;
} else
ntfs_error(vi->i_sb, "Failed to lookup attribute in "
"inode 0x%lx (error code %d).%s",
vi->i_ino, err, te);
goto old_bad_out;
}
m = ctx->mrec;
a = ctx->attr;
/*
* The i_size of the vfs inode is the new size for the attribute value.
*/
new_size = i_size_read(vi);
/* The current size of the attribute value is the old size. */
old_size = ntfs_attr_size(a);
/* Calculate the new allocated size. */
if (NInoNonResident(ni))
new_alloc_size = (new_size + vol->cluster_size - 1) &
~(s64)vol->cluster_size_mask;
else
new_alloc_size = (new_size + 7) & ~7;
/* The current allocated size is the old allocated size. */
read_lock_irqsave(&ni->size_lock, flags);
old_alloc_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/*
* The change in the file size. This will be 0 if no change, >0 if the
* size is growing, and <0 if the size is shrinking.
*/
size_change = -1;
if (new_size - old_size >= 0) {
size_change = 1;
if (new_size == old_size)
size_change = 0;
}
/* As above for the allocated size. */
alloc_change = -1;
if (new_alloc_size - old_alloc_size >= 0) {
alloc_change = 1;
if (new_alloc_size == old_alloc_size)
alloc_change = 0;
}
/*
* If neither the size nor the allocation are being changed there is
* nothing to do.
*/
if (!size_change && !alloc_change)
goto unm_done;
/* If the size is changing, check if new size is allowed in $AttrDef. */
if (size_change) {
err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
if (unlikely(err)) {
if (err == -ERANGE) {
ntfs_error(vol->sb, "Truncate would cause the "
"inode 0x%lx to %simum size "
"for its attribute type "
"(0x%x). Aborting truncate.",
vi->i_ino,
new_size > old_size ? "exceed "
"the max" : "go under the min",
le32_to_cpu(ni->type));
err = -EFBIG;
} else {
ntfs_error(vol->sb, "Inode 0x%lx has unknown "
"attribute type 0x%x. "
"Aborting truncate.",
vi->i_ino,
le32_to_cpu(ni->type));
err = -EIO;
}
/* Reset the vfs inode size to the old size. */
i_size_write(vi, old_size);
goto err_out;
}
}
if (NInoCompressed(ni) || NInoEncrypted(ni)) {
ntfs_warning(vi->i_sb, "Changes in inode size are not "
"supported yet for %s files, ignoring.",
NInoCompressed(ni) ? "compressed" :
"encrypted");
err = -EOPNOTSUPP;
goto bad_out;
}
if (a->non_resident)
goto do_non_resident_truncate;
BUG_ON(NInoNonResident(ni));
/* Resize the attribute record to best fit the new attribute size. */
if (new_size < vol->mft_record_size &&
!ntfs_resident_attr_value_resize(m, a, new_size)) {
/* The resize succeeded! */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
write_lock_irqsave(&ni->size_lock, flags);
/* Update the sizes in the ntfs inode and all is done. */
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
/*
* Note ntfs_resident_attr_value_resize() has already done any
* necessary data clearing in the attribute record. When the
* file is being shrunk vmtruncate() will already have cleared
* the top part of the last partial page, i.e. since this is
* the resident case this is the page with index 0. However,
* when the file is being expanded, the page cache page data
* between the old data_size, i.e. old_size, and the new_size
* has not been zeroed. Fortunately, we do not need to zero it
* either since on one hand it will either already be zero due
* to both read_folio and writepage clearing partial page data
* beyond i_size in which case there is nothing to do or in the
* case of the file being mmap()ped at the same time, POSIX
* specifies that the behaviour is unspecified thus we do not
* have to do anything. This means that in our implementation
* in the rare case that the file is mmap()ped and a write
* occurred into the mmap()ped region just beyond the file size
* and writepage has not yet been called to write out the page
* (which would clear the area beyond the file size) and we now
* extend the file size to incorporate this dirty region
* outside the file size, a write of the page would result in
* this data being written to disk instead of being cleared.
* Given both POSIX and the Linux mmap(2) man page specify that
* this corner case is undefined, we choose to leave it like
* that as this is much simpler for us as we cannot lock the
* relevant page now since we are holding too many ntfs locks
* which would result in a lock reversal deadlock.
*/
ni->initialized_size = new_size;
write_unlock_irqrestore(&ni->size_lock, flags);
goto unm_done;
}
/* If the above resize failed, this must be an attribute extension. */
BUG_ON(size_change < 0);
/*
* We have to drop all the locks so we can call
* ntfs_attr_make_non_resident(). This could be optimised by try-
* locking the first page cache page and only if that fails dropping
* the locks, locking the page, and redoing all the locking and
* lookups. While this would be a huge optimisation, it is not worth
* it as this is definitely a slow code path as it only ever can happen
* once for any given file.
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* Not enough space in the mft record, try to make the attribute
* non-resident and if successful restart the truncation process.
*/
err = ntfs_attr_make_non_resident(ni, old_size);
if (likely(!err))
goto retry_truncate;
/*
* Could not make non-resident. If this is due to this not being
* permitted for this attribute type or there not being enough space,
* try to make other attributes non-resident. Otherwise fail.
*/
if (unlikely(err != -EPERM && err != -ENOSPC)) {
ntfs_error(vol->sb, "Cannot truncate inode 0x%lx, attribute "
"type 0x%x, because the conversion from "
"resident to non-resident attribute failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
goto conv_err_out;
}
/* TODO: Not implemented from here, abort. */
if (err == -ENOSPC)
ntfs_error(vol->sb, "Not enough space in the mft record/on "
"disk for the non-resident attribute value. "
"This case is not implemented yet.");
else /* if (err == -EPERM) */
ntfs_error(vol->sb, "This attribute type may not be "
"non-resident. This case is not implemented "
"yet.");
err = -EOPNOTSUPP;
goto conv_err_out;
#if 0
// TODO: Attempt to make other attributes non-resident.
if (!err)
goto do_resident_extend;
/*
* Both the attribute list attribute and the standard information
* attribute must remain in the base inode. Thus, if this is one of
* these attributes, we have to try to move other attributes out into
* extent mft records instead.
*/
if (ni->type == AT_ATTRIBUTE_LIST ||
ni->type == AT_STANDARD_INFORMATION) {
// TODO: Attempt to move other attributes into extent mft
// records.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
goto err_out;
}
// TODO: Attempt to move this attribute to an extent mft record, but
// only if it is not already the only attribute in an mft record in
// which case there would be nothing to gain.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
/* There is nothing we can do to make enough space. )-: */
goto err_out;
#endif
do_non_resident_truncate:
BUG_ON(!NInoNonResident(ni));
if (alloc_change < 0) {
highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
if (highest_vcn > 0 &&
old_alloc_size >> vol->cluster_size_bits >
highest_vcn + 1) {
/*
* This attribute has multiple extents. Not yet
* supported.
*/
ntfs_error(vol->sb, "Cannot truncate inode 0x%lx, "
"attribute type 0x%x, because the "
"attribute is highly fragmented (it "
"consists of multiple extents) and "
"this case is not implemented yet.",
vi->i_ino,
(unsigned)le32_to_cpu(ni->type));
err = -EOPNOTSUPP;
goto bad_out;
}
}
/*
* If the size is shrinking, need to reduce the initialized_size and
* the data_size before reducing the allocation.
*/
if (size_change < 0) {
/*
* Make the valid size smaller (i_size is already up-to-date).
*/
write_lock_irqsave(&ni->size_lock, flags);
if (new_size < ni->initialized_size) {
ni->initialized_size = new_size;
a->data.non_resident.initialized_size =
cpu_to_sle64(new_size);
}
a->data.non_resident.data_size = cpu_to_sle64(new_size);
write_unlock_irqrestore(&ni->size_lock, flags);
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
/* If the allocated size is not changing, we are done. */
if (!alloc_change)
goto unm_done;
/*
* If the size is shrinking it makes no sense for the
* allocation to be growing.
*/
BUG_ON(alloc_change > 0);
} else /* if (size_change >= 0) */ {
/*
* The file size is growing or staying the same but the
* allocation can be shrinking, growing or staying the same.
*/
if (alloc_change > 0) {
/*
* We need to extend the allocation and possibly update
* the data size. If we are updating the data size,
* since we are not touching the initialized_size we do
* not need to worry about the actual data on disk.
* And as far as the page cache is concerned, there
* will be no pages beyond the old data size and any
* partial region in the last page between the old and
* new data size (or the end of the page if the new
* data size is outside the page) does not need to be
* modified as explained above for the resident
* attribute truncate case. To do this, we simply drop
* the locks we hold and leave all the work to our
* friendly helper ntfs_attr_extend_allocation().
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
err = ntfs_attr_extend_allocation(ni, new_size,
size_change > 0 ? new_size : -1, -1);
/*
* ntfs_attr_extend_allocation() will have done error
* output already.
*/
goto done;
}
if (!alloc_change)
goto alloc_done;
}
/* alloc_change < 0 */
/* Free the clusters. */
nr_freed = ntfs_cluster_free(ni, new_alloc_size >>
vol->cluster_size_bits, -1, ctx);
m = ctx->mrec;
a = ctx->attr;
if (unlikely(nr_freed < 0)) {
ntfs_error(vol->sb, "Failed to release cluster(s) (error code "
"%lli). Unmount and run chkdsk to recover "
"the lost cluster(s).", (long long)nr_freed);
NVolSetErrors(vol);
nr_freed = 0;
}
/* Truncate the runlist. */
err = ntfs_rl_truncate_nolock(vol, &ni->runlist,
new_alloc_size >> vol->cluster_size_bits);
/*
* If the runlist truncation failed and/or the search context is no
* longer valid, we cannot resize the attribute record or build the
* mapping pairs array thus we mark the inode bad so that no access to
* the freed clusters can happen.
*/
if (unlikely(err || IS_ERR(m))) {
ntfs_error(vol->sb, "Failed to %s (error code %li).%s",
IS_ERR(m) ?
"restore attribute search context" :
"truncate attribute runlist",
IS_ERR(m) ? PTR_ERR(m) : err, es);
err = -EIO;
goto bad_out;
}
/* Get the size for the shrunk mapping pairs array for the runlist. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, ni->runlist.rl, 0, -1);
if (unlikely(mp_size <= 0)) {
ntfs_error(vol->sb, "Cannot shrink allocation of inode 0x%lx, "
"attribute type 0x%x, because determining the "
"size for the mapping pairs failed with error "
"code %i.%s", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), mp_size, es);
err = -EIO;
goto bad_out;
}
/*
* Shrink the attribute record for the new mapping pairs array. Note,
* this cannot fail since we are making the attribute smaller thus by
* definition there is enough space to do so.
*/
err = ntfs_attr_record_resize(m, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
BUG_ON(err);
/*
* Generate the mapping pairs array directly into the attribute record.
*/
err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, ni->runlist.rl, 0, -1, NULL);
if (unlikely(err)) {
ntfs_error(vol->sb, "Cannot shrink allocation of inode 0x%lx, "
"attribute type 0x%x, because building the "
"mapping pairs failed with error code %i.%s",
vi->i_ino, (unsigned)le32_to_cpu(ni->type),
err, es);
err = -EIO;
goto bad_out;
}
/* Update the allocated/compressed size as well as the highest vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
vol->cluster_size_bits) - 1);
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
if (NInoSparse(ni) || NInoCompressed(ni)) {
if (nr_freed) {
ni->itype.compressed.size -= nr_freed <<
vol->cluster_size_bits;
BUG_ON(ni->itype.compressed.size < 0);
a->data.non_resident.compressed_size = cpu_to_sle64(
ni->itype.compressed.size);
vi->i_blocks = ni->itype.compressed.size >> 9;
}
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
/*
* We have shrunk the allocation. If this is a shrinking truncate we
* have already dealt with the initialized_size and the data_size above
* and we are done. If the truncate is only changing the allocation
* and not the data_size, we are also done. If this is an extending
* truncate, need to extend the data_size now which is ensured by the
* fact that @size_change is positive.
*/
alloc_done:
/*
* If the size is growing, need to update it now. If it is shrinking,
* we have already updated it above (before the allocation change).
*/
if (size_change > 0)
a->data.non_resident.data_size = cpu_to_sle64(new_size);
/* Ensure the modified mft record is written out. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
unm_done:
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
done:
/* Update the mtime and ctime on the base inode. */
/* normally ->truncate shouldn't update ctime or mtime,
* but ntfs did before so it got a copy & paste version
* of file_update_time. one day someone should fix this
* for real.
*/
if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
struct timespec64 now = current_time(VFS_I(base_ni));
struct timespec64 ctime = inode_get_ctime(VFS_I(base_ni));
int sync_it = 0;
if (!timespec64_equal(&VFS_I(base_ni)->i_mtime, &now) ||
!timespec64_equal(&ctime, &now))
sync_it = 1;
inode_set_ctime_to_ts(VFS_I(base_ni), now);
VFS_I(base_ni)->i_mtime = now;
if (sync_it)
mark_inode_dirty_sync(VFS_I(base_ni));
}
if (likely(!err)) {
NInoClearTruncateFailed(ni);
ntfs_debug("Done.");
}
return err;
old_bad_out:
old_size = -1;
bad_out:
if (err != -ENOMEM && err != -EOPNOTSUPP)
NVolSetErrors(vol);
if (err != -EOPNOTSUPP)
NInoSetTruncateFailed(ni);
else if (old_size >= 0)
i_size_write(vi, old_size);
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
out:
ntfs_debug("Failed. Returning error code %i.", err);
return err;
conv_err_out:
if (err != -ENOMEM && err != -EOPNOTSUPP)
NVolSetErrors(vol);
if (err != -EOPNOTSUPP)
NInoSetTruncateFailed(ni);
else
i_size_write(vi, old_size);
goto out;
}
/**
* ntfs_truncate_vfs - wrapper for ntfs_truncate() that has no return value
* @vi: inode for which the i_size was changed
*
* Wrapper for ntfs_truncate() that has no return value.
*
* See ntfs_truncate() description above for details.
*/
#ifdef NTFS_RW
void ntfs_truncate_vfs(struct inode *vi) {
ntfs_truncate(vi);
}
#endif
/**
* ntfs_setattr - called from notify_change() when an attribute is being changed
* @idmap: idmap of the mount the inode was found from
* @dentry: dentry whose attributes to change
* @attr: structure describing the attributes and the changes
*
* We have to trap VFS attempts to truncate the file described by @dentry as
* soon as possible, because we do not implement changes in i_size yet. So we
* abort all i_size changes here.
*
* We also abort all changes of user, group, and mode as we do not implement
* the NTFS ACLs yet.
*
* Called with ->i_mutex held.
*/
int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *vi = d_inode(dentry);
int err;
unsigned int ia_valid = attr->ia_valid;
err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (err)
goto out;
/* We do not support NTFS ACLs yet. */
if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) {
ntfs_warning(vi->i_sb, "Changes in user/group/mode are not "
"supported yet, ignoring.");
err = -EOPNOTSUPP;
goto out;
}
if (ia_valid & ATTR_SIZE) {
if (attr->ia_size != i_size_read(vi)) {
ntfs_inode *ni = NTFS_I(vi);
/*
* FIXME: For now we do not support resizing of
* compressed or encrypted files yet.
*/
if (NInoCompressed(ni) || NInoEncrypted(ni)) {
ntfs_warning(vi->i_sb, "Changes in inode size "
"are not supported yet for "
"%s files, ignoring.",
NInoCompressed(ni) ?
"compressed" : "encrypted");
err = -EOPNOTSUPP;
} else {
truncate_setsize(vi, attr->ia_size);
ntfs_truncate_vfs(vi);
}
if (err || ia_valid == ATTR_SIZE)
goto out;
} else {
/*
* We skipped the truncate but must still update
* timestamps.
*/
ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
}
if (ia_valid & ATTR_ATIME)
vi->i_atime = attr->ia_atime;
if (ia_valid & ATTR_MTIME)
vi->i_mtime = attr->ia_mtime;
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(vi, attr->ia_ctime);
mark_inode_dirty(vi);
out:
return err;
}
/**
* __ntfs_write_inode - write out a dirty inode
* @vi: inode to write out
* @sync: if true, write out synchronously
*
* Write out a dirty inode to disk including any extent inodes if present.
*
* If @sync is true, commit the inode to disk and wait for io completion. This
* is done using write_mft_record().
*
* If @sync is false, just schedule the write to happen but do not wait for i/o
* completion. In 2.6 kernels, scheduling usually happens just by virtue of
* marking the page (and in this case mft record) dirty but we do not implement
* this yet as write_mft_record() largely ignores the @sync parameter and
* always performs synchronous writes.
*
* Return 0 on success and -errno on error.
*/
int __ntfs_write_inode(struct inode *vi, int sync)
{
sle64 nt;
ntfs_inode *ni = NTFS_I(vi);
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
STANDARD_INFORMATION *si;
int err = 0;
bool modified = false;
ntfs_debug("Entering for %sinode 0x%lx.", NInoAttr(ni) ? "attr " : "",
vi->i_ino);
/*
* Dirty attribute inodes are written via their real inodes so just
* clean them here. Access time updates are taken care off when the
* real inode is written.
*/
if (NInoAttr(ni)) {
NInoClearDirty(ni);
ntfs_debug("Done.");
return 0;
}
/* Map, pin, and lock the mft record belonging to the inode. */
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
goto err_out;
}
/* Update the access times in the standard information attribute. */
ctx = ntfs_attr_get_search_ctx(ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto unm_err_out;
}
err = ntfs_attr_lookup(AT_STANDARD_INFORMATION, NULL, 0,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
ntfs_attr_put_search_ctx(ctx);
goto unm_err_out;
}
si = (STANDARD_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Update the access times if they have changed. */
nt = utc2ntfs(vi->i_mtime);
if (si->last_data_change_time != nt) {
ntfs_debug("Updating mtime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
sle64_to_cpu(si->last_data_change_time),
(long long)sle64_to_cpu(nt));
si->last_data_change_time = nt;
modified = true;
}
nt = utc2ntfs(inode_get_ctime(vi));
if (si->last_mft_change_time != nt) {
ntfs_debug("Updating ctime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
sle64_to_cpu(si->last_mft_change_time),
(long long)sle64_to_cpu(nt));
si->last_mft_change_time = nt;
modified = true;
}
nt = utc2ntfs(vi->i_atime);
if (si->last_access_time != nt) {
ntfs_debug("Updating atime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino,
(long long)sle64_to_cpu(si->last_access_time),
(long long)sle64_to_cpu(nt));
si->last_access_time = nt;
modified = true;
}
/*
* If we just modified the standard information attribute we need to
* mark the mft record it is in dirty. We do this manually so that
* mark_inode_dirty() is not called which would redirty the inode and
* hence result in an infinite loop of trying to write the inode.
* There is no need to mark the base inode nor the base mft record
* dirty, since we are going to write this mft record below in any case
* and the base mft record may actually not have been modified so it
* might not need to be written out.
* NOTE: It is not a problem when the inode for $MFT itself is being
* written out as mark_ntfs_record_dirty() will only set I_DIRTY_PAGES
* on the $MFT inode and hence __ntfs_write_inode() will not be
* re-invoked because of it which in turn is ok since the dirtied mft
* record will be cleaned and written out to disk below, i.e. before
* this function returns.
*/
if (modified) {
flush_dcache_mft_record_page(ctx->ntfs_ino);
if (!NInoTestSetDirty(ctx->ntfs_ino))
mark_ntfs_record_dirty(ctx->ntfs_ino->page,
ctx->ntfs_ino->page_ofs);
}
ntfs_attr_put_search_ctx(ctx);
/* Now the access times are updated, write the base mft record. */
if (NInoDirty(ni))
err = write_mft_record(ni, m, sync);
/* Write all attached extent mft records. */
mutex_lock(&ni->extent_lock);
if (ni->nr_extents > 0) {
ntfs_inode **extent_nis = ni->ext.extent_ntfs_inos;
int i;
ntfs_debug("Writing %i extent inodes.", ni->nr_extents);
for (i = 0; i < ni->nr_extents; i++) {
ntfs_inode *tni = extent_nis[i];
if (NInoDirty(tni)) {
MFT_RECORD *tm = map_mft_record(tni);
int ret;
if (IS_ERR(tm)) {
if (!err || err == -ENOMEM)
err = PTR_ERR(tm);
continue;
}
ret = write_mft_record(tni, tm, sync);
unmap_mft_record(tni);
if (unlikely(ret)) {
if (!err || err == -ENOMEM)
err = ret;
}
}
}
}
mutex_unlock(&ni->extent_lock);
unmap_mft_record(ni);
if (unlikely(err))
goto err_out;
ntfs_debug("Done.");
return 0;
unm_err_out:
unmap_mft_record(ni);
err_out:
if (err == -ENOMEM) {
ntfs_warning(vi->i_sb, "Not enough memory to write inode. "
"Marking the inode dirty again, so the VFS "
"retries later.");
mark_inode_dirty(vi);
} else {
ntfs_error(vi->i_sb, "Failed (error %i): Run chkdsk.", -err);
NVolSetErrors(ni->vol);
}
return err;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/inode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* index.c - NTFS kernel index handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004-2005 Anton Altaparmakov
*/
#include <linux/slab.h>
#include "aops.h"
#include "collate.h"
#include "debug.h"
#include "index.h"
#include "ntfs.h"
/**
* ntfs_index_ctx_get - allocate and initialize a new index context
* @idx_ni: ntfs index inode with which to initialize the context
*
* Allocate a new index context, initialize it with @idx_ni and return it.
* Return NULL if allocation failed.
*
* Locking: Caller must hold i_mutex on the index inode.
*/
ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
{
ntfs_index_context *ictx;
ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
if (ictx)
*ictx = (ntfs_index_context){ .idx_ni = idx_ni };
return ictx;
}
/**
* ntfs_index_ctx_put - release an index context
* @ictx: index context to free
*
* Release the index context @ictx, releasing all associated resources.
*
* Locking: Caller must hold i_mutex on the index inode.
*/
void ntfs_index_ctx_put(ntfs_index_context *ictx)
{
if (ictx->entry) {
if (ictx->is_in_root) {
if (ictx->actx)
ntfs_attr_put_search_ctx(ictx->actx);
if (ictx->base_ni)
unmap_mft_record(ictx->base_ni);
} else {
struct page *page = ictx->page;
if (page) {
BUG_ON(!PageLocked(page));
unlock_page(page);
ntfs_unmap_page(page);
}
}
}
kmem_cache_free(ntfs_index_ctx_cache, ictx);
return;
}
/**
* ntfs_index_lookup - find a key in an index and return its index entry
* @key: [IN] key for which to search in the index
* @key_len: [IN] length of @key in bytes
* @ictx: [IN/OUT] context describing the index and the returned entry
*
* Before calling ntfs_index_lookup(), @ictx must have been obtained from a
* call to ntfs_index_ctx_get().
*
* Look for the @key in the index specified by the index lookup context @ictx.
* ntfs_index_lookup() walks the contents of the index looking for the @key.
*
* If the @key is found in the index, 0 is returned and @ictx is setup to
* describe the index entry containing the matching @key. @ictx->entry is the
* index entry and @ictx->data and @ictx->data_len are the index entry data and
* its length in bytes, respectively.
*
* If the @key is not found in the index, -ENOENT is returned and @ictx is
* setup to describe the index entry whose key collates immediately after the
* search @key, i.e. this is the position in the index at which an index entry
* with a key of @key would need to be inserted.
*
* If an error occurs return the negative error code and @ictx is left
* untouched.
*
* When finished with the entry and its data, call ntfs_index_ctx_put() to free
* the context and other associated resources.
*
* If the index entry was modified, call flush_dcache_index_entry_page()
* immediately after the modification and either ntfs_index_entry_mark_dirty()
* or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
* ensure that the changes are written to disk.
*
* Locking: - Caller must hold i_mutex on the index inode.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
* applies the mst protection fixups before writing out and then
* removes them again after the write is complete after which it
* unlocks the page.
*/
int ntfs_index_lookup(const void *key, const int key_len,
ntfs_index_context *ictx)
{
VCN vcn, old_vcn;
ntfs_inode *idx_ni = ictx->idx_ni;
ntfs_volume *vol = idx_ni->vol;
struct super_block *sb = vol->sb;
ntfs_inode *base_ni = idx_ni->ext.base_ntfs_ino;
MFT_RECORD *m;
INDEX_ROOT *ir;
INDEX_ENTRY *ie;
INDEX_ALLOCATION *ia;
u8 *index_end, *kaddr;
ntfs_attr_search_ctx *actx;
struct address_space *ia_mapping;
struct page *page;
int rc, err = 0;
ntfs_debug("Entering.");
BUG_ON(!NInoAttr(idx_ni));
BUG_ON(idx_ni->type != AT_INDEX_ALLOCATION);
BUG_ON(idx_ni->nr_extents != -1);
BUG_ON(!base_ni);
BUG_ON(!key);
BUG_ON(key_len <= 0);
if (!ntfs_is_collation_rule_supported(
idx_ni->itype.index.collation_rule)) {
ntfs_error(sb, "Index uses unsupported collation rule 0x%x. "
"Aborting lookup.", le32_to_cpu(
idx_ni->itype.index.collation_rule));
return -EOPNOTSUPP;
}
/* Get hold of the mft record for the index inode. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
ntfs_error(sb, "map_mft_record() failed with error code %ld.",
-PTR_ERR(m));
return PTR_ERR(m);
}
actx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!actx)) {
err = -ENOMEM;
goto err_out;
}
/* Find the index root attribute in the mft record. */
err = ntfs_attr_lookup(AT_INDEX_ROOT, idx_ni->name, idx_ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, actx);
if (unlikely(err)) {
if (err == -ENOENT) {
ntfs_error(sb, "Index root attribute missing in inode "
"0x%lx.", idx_ni->mft_no);
err = -EIO;
}
goto err_out;
}
/* Get to the index root value (it has been verified in read_inode). */
ir = (INDEX_ROOT*)((u8*)actx->attr +
le16_to_cpu(actx->attr->data.resident.value_offset));
index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ir->index +
le32_to_cpu(ir->index.entries_offset));
/*
* Loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds checks. */
if ((u8*)ie < (u8*)actx->mrec || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->length) > index_end)
goto idx_err_out;
/*
* The last entry cannot contain a key. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/* Further bounds checks. */
if ((u32)sizeof(INDEX_ENTRY_HEADER) +
le16_to_cpu(ie->key_length) >
le16_to_cpu(ie->data.vi.data_offset) ||
(u32)le16_to_cpu(ie->data.vi.data_offset) +
le16_to_cpu(ie->data.vi.data_length) >
le16_to_cpu(ie->length))
goto idx_err_out;
/* If the keys match perfectly, we setup @ictx and return 0. */
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) {
ir_done:
ictx->is_in_root = true;
ictx->ir = ir;
ictx->actx = actx;
ictx->base_ni = base_ni;
ictx->ia = NULL;
ictx->page = NULL;
done:
ictx->entry = ie;
ictx->data = (u8*)ie +
le16_to_cpu(ie->data.vi.data_offset);
ictx->data_len = le16_to_cpu(ie->data.vi.data_length);
ntfs_debug("Done.");
return err;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
key_len, &ie->key, le16_to_cpu(ie->key_length));
/*
* If @key collates before the key of the current entry, there
* is definitely no such key in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/*
* A match should never happen as the memcmp() call should have
* cought it, but we still treat it correctly.
*/
if (!rc)
goto ir_done;
/* The keys are not equal, continue the search. */
}
/*
* We have finished with this index without success. Check for the
* presence of a child node and if not present setup @ictx and return
* -ENOENT.
*/
if (!(ie->flags & INDEX_ENTRY_NODE)) {
ntfs_debug("Entry not found.");
err = -ENOENT;
goto ir_done;
} /* Child node present, descend into it. */
/* Consistency check: Verify that an index allocation exists. */
if (!NInoIndexAllocPresent(idx_ni)) {
ntfs_error(sb, "No index allocation attribute but index entry "
"requires one. Inode 0x%lx is corrupt or "
"driver bug.", idx_ni->mft_no);
goto err_out;
}
/* Get the starting vcn of the index_block holding the child node. */
vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
ia_mapping = VFS_I(idx_ni)->i_mapping;
/*
* We are done with the index root and the mft record. Release them,
* otherwise we deadlock with ntfs_map_page().
*/
ntfs_attr_put_search_ctx(actx);
unmap_mft_record(base_ni);
m = NULL;
actx = NULL;
descend_into_child_node:
/*
* Convert vcn to index into the index allocation attribute in units
* of PAGE_SIZE and map the page cache page, reading it from
* disk if necessary.
*/
page = ntfs_map_page(ia_mapping, vcn <<
idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
if (IS_ERR(page)) {
ntfs_error(sb, "Failed to map index page, error %ld.",
-PTR_ERR(page));
err = PTR_ERR(page);
goto err_out;
}
lock_page(page);
kaddr = (u8*)page_address(page);
fast_descend_into_child_node:
/* Get to the index allocation block. */
ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
/* Bounds checks. */
if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
"0x%lx or driver bug.", idx_ni->mft_no);
goto unm_err_out;
}
/* Catch multi sector transfer fixup errors. */
if (unlikely(!ntfs_is_indx_record(ia->magic))) {
ntfs_error(sb, "Index record with vcn 0x%llx is corrupt. "
"Corrupt inode 0x%lx. Run chkdsk.",
(long long)vcn, idx_ni->mft_no);
goto unm_err_out;
}
if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
"different from expected VCN (0x%llx). Inode "
"0x%lx is corrupt or driver bug.",
(unsigned long long)
sle64_to_cpu(ia->index_block_vcn),
(unsigned long long)vcn, idx_ni->mft_no);
goto unm_err_out;
}
if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
idx_ni->itype.index.block_size) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx has "
"a size (%u) differing from the index "
"specified size (%u). Inode is corrupt or "
"driver bug.", (unsigned long long)vcn,
idx_ni->mft_no,
le32_to_cpu(ia->index.allocated_size) + 0x18,
idx_ni->itype.index.block_size);
goto unm_err_out;
}
index_end = (u8*)ia + idx_ni->itype.index.block_size;
if (index_end > kaddr + PAGE_SIZE) {
ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
"crosses page boundary. Impossible! Cannot "
"access! This is probably a bug in the "
"driver.", (unsigned long long)vcn,
idx_ni->mft_no);
goto unm_err_out;
}
index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
if (index_end > (u8*)ia + idx_ni->itype.index.block_size) {
ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of inode "
"0x%lx exceeds maximum size.",
(unsigned long long)vcn, idx_ni->mft_no);
goto unm_err_out;
}
/* The first index entry. */
ie = (INDEX_ENTRY*)((u8*)&ia->index +
le32_to_cpu(ia->index.entries_offset));
/*
* Iterate similar to above big loop but applied to index buffer, thus
* loop until we exceed valid memory (corruption case) or until we
* reach the last entry.
*/
for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
/* Bounds checks. */
if ((u8*)ie < (u8*)ia || (u8*)ie +
sizeof(INDEX_ENTRY_HEADER) > index_end ||
(u8*)ie + le16_to_cpu(ie->length) > index_end) {
ntfs_error(sb, "Index entry out of bounds in inode "
"0x%lx.", idx_ni->mft_no);
goto unm_err_out;
}
/*
* The last entry cannot contain a key. It can however contain
* a pointer to a child node in the B+tree so we just break out.
*/
if (ie->flags & INDEX_ENTRY_END)
break;
/* Further bounds checks. */
if ((u32)sizeof(INDEX_ENTRY_HEADER) +
le16_to_cpu(ie->key_length) >
le16_to_cpu(ie->data.vi.data_offset) ||
(u32)le16_to_cpu(ie->data.vi.data_offset) +
le16_to_cpu(ie->data.vi.data_length) >
le16_to_cpu(ie->length)) {
ntfs_error(sb, "Index entry out of bounds in inode "
"0x%lx.", idx_ni->mft_no);
goto unm_err_out;
}
/* If the keys match perfectly, we setup @ictx and return 0. */
if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
&ie->key, key_len)) {
ia_done:
ictx->is_in_root = false;
ictx->actx = NULL;
ictx->base_ni = NULL;
ictx->ia = ia;
ictx->page = page;
goto done;
}
/*
* Not a perfect match, need to do full blown collation so we
* know which way in the B+tree we have to go.
*/
rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
key_len, &ie->key, le16_to_cpu(ie->key_length));
/*
* If @key collates before the key of the current entry, there
* is definitely no such key in this index but we might need to
* descend into the B+tree so we just break out of the loop.
*/
if (rc == -1)
break;
/*
* A match should never happen as the memcmp() call should have
* cought it, but we still treat it correctly.
*/
if (!rc)
goto ia_done;
/* The keys are not equal, continue the search. */
}
/*
* We have finished with this index buffer without success. Check for
* the presence of a child node and if not present return -ENOENT.
*/
if (!(ie->flags & INDEX_ENTRY_NODE)) {
ntfs_debug("Entry not found.");
err = -ENOENT;
goto ia_done;
}
if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
ntfs_error(sb, "Index entry with child node found in a leaf "
"node in inode 0x%lx.", idx_ni->mft_no);
goto unm_err_out;
}
/* Child node present, descend into it. */
old_vcn = vcn;
vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
if (vcn >= 0) {
/*
* If vcn is in the same page cache page as old_vcn we recycle
* the mapped page.
*/
if (old_vcn << vol->cluster_size_bits >>
PAGE_SHIFT == vcn <<
vol->cluster_size_bits >>
PAGE_SHIFT)
goto fast_descend_into_child_node;
unlock_page(page);
ntfs_unmap_page(page);
goto descend_into_child_node;
}
ntfs_error(sb, "Negative child node vcn in inode 0x%lx.",
idx_ni->mft_no);
unm_err_out:
unlock_page(page);
ntfs_unmap_page(page);
err_out:
if (!err)
err = -EIO;
if (actx)
ntfs_attr_put_search_ctx(actx);
if (m)
unmap_mft_record(base_ni);
return err;
idx_err_out:
ntfs_error(sb, "Corrupt index. Aborting lookup.");
goto err_out;
}
| linux-master | fs/ntfs/index.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* runlist.c - NTFS runlist handling code. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (c) 2002-2005 Richard Russon
*/
#include "debug.h"
#include "dir.h"
#include "endian.h"
#include "malloc.h"
#include "ntfs.h"
/**
* ntfs_rl_mm - runlist memmove
*
* It is up to the caller to serialize access to the runlist @base.
*/
static inline void ntfs_rl_mm(runlist_element *base, int dst, int src,
int size)
{
if (likely((dst != src) && (size > 0)))
memmove(base + dst, base + src, size * sizeof(*base));
}
/**
* ntfs_rl_mc - runlist memory copy
*
* It is up to the caller to serialize access to the runlists @dstbase and
* @srcbase.
*/
static inline void ntfs_rl_mc(runlist_element *dstbase, int dst,
runlist_element *srcbase, int src, int size)
{
if (likely(size > 0))
memcpy(dstbase + dst, srcbase + src, size * sizeof(*dstbase));
}
/**
* ntfs_rl_realloc - Reallocate memory for runlists
* @rl: original runlist
* @old_size: number of runlist elements in the original runlist @rl
* @new_size: number of runlist elements we need space for
*
* As the runlists grow, more memory will be required. To prevent the
* kernel having to allocate and reallocate large numbers of small bits of
* memory, this function returns an entire page of memory.
*
* It is up to the caller to serialize access to the runlist @rl.
*
* N.B. If the new allocation doesn't require a different number of pages in
* memory, the function will return the original pointer.
*
* On success, return a pointer to the newly allocated, or recycled, memory.
* On error, return -errno. The following error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
*/
static inline runlist_element *ntfs_rl_realloc(runlist_element *rl,
int old_size, int new_size)
{
runlist_element *new_rl;
old_size = PAGE_ALIGN(old_size * sizeof(*rl));
new_size = PAGE_ALIGN(new_size * sizeof(*rl));
if (old_size == new_size)
return rl;
new_rl = ntfs_malloc_nofs(new_size);
if (unlikely(!new_rl))
return ERR_PTR(-ENOMEM);
if (likely(rl != NULL)) {
if (unlikely(old_size > new_size))
old_size = new_size;
memcpy(new_rl, rl, old_size);
ntfs_free(rl);
}
return new_rl;
}
/**
* ntfs_rl_realloc_nofail - Reallocate memory for runlists
* @rl: original runlist
* @old_size: number of runlist elements in the original runlist @rl
* @new_size: number of runlist elements we need space for
*
* As the runlists grow, more memory will be required. To prevent the
* kernel having to allocate and reallocate large numbers of small bits of
* memory, this function returns an entire page of memory.
*
* This function guarantees that the allocation will succeed. It will sleep
* for as long as it takes to complete the allocation.
*
* It is up to the caller to serialize access to the runlist @rl.
*
* N.B. If the new allocation doesn't require a different number of pages in
* memory, the function will return the original pointer.
*
* On success, return a pointer to the newly allocated, or recycled, memory.
* On error, return -errno. The following error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
*/
static inline runlist_element *ntfs_rl_realloc_nofail(runlist_element *rl,
int old_size, int new_size)
{
runlist_element *new_rl;
old_size = PAGE_ALIGN(old_size * sizeof(*rl));
new_size = PAGE_ALIGN(new_size * sizeof(*rl));
if (old_size == new_size)
return rl;
new_rl = ntfs_malloc_nofs_nofail(new_size);
BUG_ON(!new_rl);
if (likely(rl != NULL)) {
if (unlikely(old_size > new_size))
old_size = new_size;
memcpy(new_rl, rl, old_size);
ntfs_free(rl);
}
return new_rl;
}
/**
* ntfs_are_rl_mergeable - test if two runlists can be joined together
* @dst: original runlist
* @src: new runlist to test for mergeability with @dst
*
* Test if two runlists can be joined together. For this, their VCNs and LCNs
* must be adjacent.
*
* It is up to the caller to serialize access to the runlists @dst and @src.
*
* Return: true Success, the runlists can be merged.
* false Failure, the runlists cannot be merged.
*/
static inline bool ntfs_are_rl_mergeable(runlist_element *dst,
runlist_element *src)
{
BUG_ON(!dst);
BUG_ON(!src);
/* We can merge unmapped regions even if they are misaligned. */
if ((dst->lcn == LCN_RL_NOT_MAPPED) && (src->lcn == LCN_RL_NOT_MAPPED))
return true;
/* If the runs are misaligned, we cannot merge them. */
if ((dst->vcn + dst->length) != src->vcn)
return false;
/* If both runs are non-sparse and contiguous, we can merge them. */
if ((dst->lcn >= 0) && (src->lcn >= 0) &&
((dst->lcn + dst->length) == src->lcn))
return true;
/* If we are merging two holes, we can merge them. */
if ((dst->lcn == LCN_HOLE) && (src->lcn == LCN_HOLE))
return true;
/* Cannot merge. */
return false;
}
/**
* __ntfs_rl_merge - merge two runlists without testing if they can be merged
* @dst: original, destination runlist
* @src: new runlist to merge with @dst
*
* Merge the two runlists, writing into the destination runlist @dst. The
* caller must make sure the runlists can be merged or this will corrupt the
* destination runlist.
*
* It is up to the caller to serialize access to the runlists @dst and @src.
*/
static inline void __ntfs_rl_merge(runlist_element *dst, runlist_element *src)
{
dst->length += src->length;
}
/**
* ntfs_rl_append - append a runlist after a given element
* @dst: original runlist to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: runlist to be inserted into @dst
* @ssize: number of elements in @src (excluding end marker)
* @loc: append the new runlist @src after this element in @dst
*
* Append the runlist @src after element @loc in @dst. Merge the right end of
* the new runlist, if necessary. Adjust the size of the hole before the
* appended runlist.
*
* It is up to the caller to serialize access to the runlists @dst and @src.
*
* On success, return a pointer to the new, combined, runlist. Note, both
* runlists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned runlist
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both runlists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
*/
static inline runlist_element *ntfs_rl_append(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc)
{
bool right = false; /* Right end of @src needs merging. */
int marker; /* End of the inserted runs. */
BUG_ON(!dst);
BUG_ON(!src);
/* First, check if the right hand end needs merging. */
if ((loc + 1) < dsize)
right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
/* Space required: @dst size + @src size, less one if we merged. */
dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - right);
if (IS_ERR(dst))
return dst;
/*
* We are guaranteed to succeed from here so can start modifying the
* original runlists.
*/
/* First, merge the right hand end, if necessary. */
if (right)
__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
/* First run after the @src runs that have been inserted. */
marker = loc + ssize + 1;
/* Move the tail of @dst out of the way, then copy in @src. */
ntfs_rl_mm(dst, marker, loc + 1 + right, dsize - (loc + 1 + right));
ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
/* Adjust the size of the preceding hole. */
dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
/* We may have changed the length of the file, so fix the end marker */
if (dst[marker].lcn == LCN_ENOENT)
dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
return dst;
}
/**
* ntfs_rl_insert - insert a runlist into another
* @dst: original runlist to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: new runlist to be inserted
* @ssize: number of elements in @src (excluding end marker)
* @loc: insert the new runlist @src before this element in @dst
*
* Insert the runlist @src before element @loc in the runlist @dst. Merge the
* left end of the new runlist, if necessary. Adjust the size of the hole
* after the inserted runlist.
*
* It is up to the caller to serialize access to the runlists @dst and @src.
*
* On success, return a pointer to the new, combined, runlist. Note, both
* runlists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned runlist
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both runlists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
*/
static inline runlist_element *ntfs_rl_insert(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc)
{
bool left = false; /* Left end of @src needs merging. */
bool disc = false; /* Discontinuity between @dst and @src. */
int marker; /* End of the inserted runs. */
BUG_ON(!dst);
BUG_ON(!src);
/*
* disc => Discontinuity between the end of @dst and the start of @src.
* This means we might need to insert a "not mapped" run.
*/
if (loc == 0)
disc = (src[0].vcn > 0);
else {
s64 merged_length;
left = ntfs_are_rl_mergeable(dst + loc - 1, src);
merged_length = dst[loc - 1].length;
if (left)
merged_length += src->length;
disc = (src[0].vcn > dst[loc - 1].vcn + merged_length);
}
/*
* Space required: @dst size + @src size, less one if we merged, plus
* one if there was a discontinuity.
*/
dst = ntfs_rl_realloc(dst, dsize, dsize + ssize - left + disc);
if (IS_ERR(dst))
return dst;
/*
* We are guaranteed to succeed from here so can start modifying the
* original runlist.
*/
if (left)
__ntfs_rl_merge(dst + loc - 1, src);
/*
* First run after the @src runs that have been inserted.
* Nominally, @marker equals @loc + @ssize, i.e. location + number of
* runs in @src. However, if @left, then the first run in @src has
* been merged with one in @dst. And if @disc, then @dst and @src do
* not meet and we need an extra run to fill the gap.
*/
marker = loc + ssize - left + disc;
/* Move the tail of @dst out of the way, then copy in @src. */
ntfs_rl_mm(dst, marker, loc, dsize - loc);
ntfs_rl_mc(dst, loc + disc, src, left, ssize - left);
/* Adjust the VCN of the first run after the insertion... */
dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
/* ... and the length. */
if (dst[marker].lcn == LCN_HOLE || dst[marker].lcn == LCN_RL_NOT_MAPPED)
dst[marker].length = dst[marker + 1].vcn - dst[marker].vcn;
/* Writing beyond the end of the file and there is a discontinuity. */
if (disc) {
if (loc > 0) {
dst[loc].vcn = dst[loc - 1].vcn + dst[loc - 1].length;
dst[loc].length = dst[loc + 1].vcn - dst[loc].vcn;
} else {
dst[loc].vcn = 0;
dst[loc].length = dst[loc + 1].vcn;
}
dst[loc].lcn = LCN_RL_NOT_MAPPED;
}
return dst;
}
/**
* ntfs_rl_replace - overwrite a runlist element with another runlist
* @dst: original runlist to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: new runlist to be inserted
* @ssize: number of elements in @src (excluding end marker)
* @loc: index in runlist @dst to overwrite with @src
*
* Replace the runlist element @dst at @loc with @src. Merge the left and
* right ends of the inserted runlist, if necessary.
*
* It is up to the caller to serialize access to the runlists @dst and @src.
*
* On success, return a pointer to the new, combined, runlist. Note, both
* runlists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned runlist
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both runlists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
*/
static inline runlist_element *ntfs_rl_replace(runlist_element *dst,
int dsize, runlist_element *src, int ssize, int loc)
{
signed delta;
bool left = false; /* Left end of @src needs merging. */
bool right = false; /* Right end of @src needs merging. */
int tail; /* Start of tail of @dst. */
int marker; /* End of the inserted runs. */
BUG_ON(!dst);
BUG_ON(!src);
/* First, see if the left and right ends need merging. */
if ((loc + 1) < dsize)
right = ntfs_are_rl_mergeable(src + ssize - 1, dst + loc + 1);
if (loc > 0)
left = ntfs_are_rl_mergeable(dst + loc - 1, src);
/*
* Allocate some space. We will need less if the left, right, or both
* ends get merged. The -1 accounts for the run being replaced.
*/
delta = ssize - 1 - left - right;
if (delta > 0) {
dst = ntfs_rl_realloc(dst, dsize, dsize + delta);
if (IS_ERR(dst))
return dst;
}
/*
* We are guaranteed to succeed from here so can start modifying the
* original runlists.
*/
/* First, merge the left and right ends, if necessary. */
if (right)
__ntfs_rl_merge(src + ssize - 1, dst + loc + 1);
if (left)
__ntfs_rl_merge(dst + loc - 1, src);
/*
* Offset of the tail of @dst. This needs to be moved out of the way
* to make space for the runs to be copied from @src, i.e. the first
* run of the tail of @dst.
* Nominally, @tail equals @loc + 1, i.e. location, skipping the
* replaced run. However, if @right, then one of @dst's runs is
* already merged into @src.
*/
tail = loc + right + 1;
/*
* First run after the @src runs that have been inserted, i.e. where
* the tail of @dst needs to be moved to.
* Nominally, @marker equals @loc + @ssize, i.e. location + number of
* runs in @src. However, if @left, then the first run in @src has
* been merged with one in @dst.
*/
marker = loc + ssize - left;
/* Move the tail of @dst out of the way, then copy in @src. */
ntfs_rl_mm(dst, marker, tail, dsize - tail);
ntfs_rl_mc(dst, loc, src, left, ssize - left);
/* We may have changed the length of the file, so fix the end marker. */
if (dsize - tail > 0 && dst[marker].lcn == LCN_ENOENT)
dst[marker].vcn = dst[marker - 1].vcn + dst[marker - 1].length;
return dst;
}
/**
* ntfs_rl_split - insert a runlist into the centre of a hole
* @dst: original runlist to be worked on
* @dsize: number of elements in @dst (including end marker)
* @src: new runlist to be inserted
* @ssize: number of elements in @src (excluding end marker)
* @loc: index in runlist @dst at which to split and insert @src
*
* Split the runlist @dst at @loc into two and insert @new in between the two
* fragments. No merging of runlists is necessary. Adjust the size of the
* holes either side.
*
* It is up to the caller to serialize access to the runlists @dst and @src.
*
* On success, return a pointer to the new, combined, runlist. Note, both
* runlists @dst and @src are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned runlist
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both runlists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
*/
static inline runlist_element *ntfs_rl_split(runlist_element *dst, int dsize,
runlist_element *src, int ssize, int loc)
{
BUG_ON(!dst);
BUG_ON(!src);
/* Space required: @dst size + @src size + one new hole. */
dst = ntfs_rl_realloc(dst, dsize, dsize + ssize + 1);
if (IS_ERR(dst))
return dst;
/*
* We are guaranteed to succeed from here so can start modifying the
* original runlists.
*/
/* Move the tail of @dst out of the way, then copy in @src. */
ntfs_rl_mm(dst, loc + 1 + ssize, loc, dsize - loc);
ntfs_rl_mc(dst, loc + 1, src, 0, ssize);
/* Adjust the size of the holes either size of @src. */
dst[loc].length = dst[loc+1].vcn - dst[loc].vcn;
dst[loc+ssize+1].vcn = dst[loc+ssize].vcn + dst[loc+ssize].length;
dst[loc+ssize+1].length = dst[loc+ssize+2].vcn - dst[loc+ssize+1].vcn;
return dst;
}
/**
* ntfs_runlists_merge - merge two runlists into one
* @drl: original runlist to be worked on
* @srl: new runlist to be merged into @drl
*
* First we sanity check the two runlists @srl and @drl to make sure that they
* are sensible and can be merged. The runlist @srl must be either after the
* runlist @drl or completely within a hole (or unmapped region) in @drl.
*
* It is up to the caller to serialize access to the runlists @drl and @srl.
*
* Merging of runlists is necessary in two cases:
* 1. When attribute lists are used and a further extent is being mapped.
* 2. When new clusters are allocated to fill a hole or extend a file.
*
* There are four possible ways @srl can be merged. It can:
* - be inserted at the beginning of a hole,
* - split the hole in two and be inserted between the two fragments,
* - be appended at the end of a hole, or it can
* - replace the whole hole.
* It can also be appended to the end of the runlist, which is just a variant
* of the insert case.
*
* On success, return a pointer to the new, combined, runlist. Note, both
* runlists @drl and @srl are deallocated before returning so you cannot use
* the pointers for anything any more. (Strictly speaking the returned runlist
* may be the same as @dst but this is irrelevant.)
*
* On error, return -errno. Both runlists are left unmodified. The following
* error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EINVAL - Invalid parameters were passed in.
* -ERANGE - The runlists overlap and cannot be merged.
*/
runlist_element *ntfs_runlists_merge(runlist_element *drl,
runlist_element *srl)
{
int di, si; /* Current index into @[ds]rl. */
int sstart; /* First index with lcn > LCN_RL_NOT_MAPPED. */
int dins; /* Index into @drl at which to insert @srl. */
int dend, send; /* Last index into @[ds]rl. */
int dfinal, sfinal; /* The last index into @[ds]rl with
lcn >= LCN_HOLE. */
int marker = 0;
VCN marker_vcn = 0;
#ifdef DEBUG
ntfs_debug("dst:");
ntfs_debug_dump_runlist(drl);
ntfs_debug("src:");
ntfs_debug_dump_runlist(srl);
#endif
/* Check for silly calling... */
if (unlikely(!srl))
return drl;
if (IS_ERR(srl) || IS_ERR(drl))
return ERR_PTR(-EINVAL);
/* Check for the case where the first mapping is being done now. */
if (unlikely(!drl)) {
drl = srl;
/* Complete the source runlist if necessary. */
if (unlikely(drl[0].vcn)) {
/* Scan to the end of the source runlist. */
for (dend = 0; likely(drl[dend].length); dend++)
;
dend++;
drl = ntfs_rl_realloc(drl, dend, dend + 1);
if (IS_ERR(drl))
return drl;
/* Insert start element at the front of the runlist. */
ntfs_rl_mm(drl, 1, 0, dend);
drl[0].vcn = 0;
drl[0].lcn = LCN_RL_NOT_MAPPED;
drl[0].length = drl[1].vcn;
}
goto finished;
}
si = di = 0;
/* Skip any unmapped start element(s) in the source runlist. */
while (srl[si].length && srl[si].lcn < LCN_HOLE)
si++;
/* Can't have an entirely unmapped source runlist. */
BUG_ON(!srl[si].length);
/* Record the starting points. */
sstart = si;
/*
* Skip forward in @drl until we reach the position where @srl needs to
* be inserted. If we reach the end of @drl, @srl just needs to be
* appended to @drl.
*/
for (; drl[di].length; di++) {
if (drl[di].vcn + drl[di].length > srl[sstart].vcn)
break;
}
dins = di;
/* Sanity check for illegal overlaps. */
if ((drl[di].vcn == srl[si].vcn) && (drl[di].lcn >= 0) &&
(srl[si].lcn >= 0)) {
ntfs_error(NULL, "Run lists overlap. Cannot merge!");
return ERR_PTR(-ERANGE);
}
/* Scan to the end of both runlists in order to know their sizes. */
for (send = si; srl[send].length; send++)
;
for (dend = di; drl[dend].length; dend++)
;
if (srl[send].lcn == LCN_ENOENT)
marker_vcn = srl[marker = send].vcn;
/* Scan to the last element with lcn >= LCN_HOLE. */
for (sfinal = send; sfinal >= 0 && srl[sfinal].lcn < LCN_HOLE; sfinal--)
;
for (dfinal = dend; dfinal >= 0 && drl[dfinal].lcn < LCN_HOLE; dfinal--)
;
{
bool start;
bool finish;
int ds = dend + 1; /* Number of elements in drl & srl */
int ss = sfinal - sstart + 1;
start = ((drl[dins].lcn < LCN_RL_NOT_MAPPED) || /* End of file */
(drl[dins].vcn == srl[sstart].vcn)); /* Start of hole */
finish = ((drl[dins].lcn >= LCN_RL_NOT_MAPPED) && /* End of file */
((drl[dins].vcn + drl[dins].length) <= /* End of hole */
(srl[send - 1].vcn + srl[send - 1].length)));
/* Or we will lose an end marker. */
if (finish && !drl[dins].length)
ss++;
if (marker && (drl[dins].vcn + drl[dins].length > srl[send - 1].vcn))
finish = false;
#if 0
ntfs_debug("dfinal = %i, dend = %i", dfinal, dend);
ntfs_debug("sstart = %i, sfinal = %i, send = %i", sstart, sfinal, send);
ntfs_debug("start = %i, finish = %i", start, finish);
ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins);
#endif
if (start) {
if (finish)
drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins);
else
drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins);
} else {
if (finish)
drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins);
else
drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins);
}
if (IS_ERR(drl)) {
ntfs_error(NULL, "Merge failed.");
return drl;
}
ntfs_free(srl);
if (marker) {
ntfs_debug("Triggering marker code.");
for (ds = dend; drl[ds].length; ds++)
;
/* We only need to care if @srl ended after @drl. */
if (drl[ds].vcn <= marker_vcn) {
int slots = 0;
if (drl[ds].vcn == marker_vcn) {
ntfs_debug("Old marker = 0x%llx, replacing "
"with LCN_ENOENT.",
(unsigned long long)
drl[ds].lcn);
drl[ds].lcn = LCN_ENOENT;
goto finished;
}
/*
* We need to create an unmapped runlist element in
* @drl or extend an existing one before adding the
* ENOENT terminator.
*/
if (drl[ds].lcn == LCN_ENOENT) {
ds--;
slots = 1;
}
if (drl[ds].lcn != LCN_RL_NOT_MAPPED) {
/* Add an unmapped runlist element. */
if (!slots) {
drl = ntfs_rl_realloc_nofail(drl, ds,
ds + 2);
slots = 2;
}
ds++;
/* Need to set vcn if it isn't set already. */
if (slots != 1)
drl[ds].vcn = drl[ds - 1].vcn +
drl[ds - 1].length;
drl[ds].lcn = LCN_RL_NOT_MAPPED;
/* We now used up a slot. */
slots--;
}
drl[ds].length = marker_vcn - drl[ds].vcn;
/* Finally add the ENOENT terminator. */
ds++;
if (!slots)
drl = ntfs_rl_realloc_nofail(drl, ds, ds + 1);
drl[ds].vcn = marker_vcn;
drl[ds].lcn = LCN_ENOENT;
drl[ds].length = (s64)0;
}
}
}
finished:
/* The merge was completed successfully. */
ntfs_debug("Merged runlist:");
ntfs_debug_dump_runlist(drl);
return drl;
}
/**
* ntfs_mapping_pairs_decompress - convert mapping pairs array to runlist
* @vol: ntfs volume on which the attribute resides
* @attr: attribute record whose mapping pairs array to decompress
* @old_rl: optional runlist in which to insert @attr's runlist
*
* It is up to the caller to serialize access to the runlist @old_rl.
*
* Decompress the attribute @attr's mapping pairs array into a runlist. On
* success, return the decompressed runlist.
*
* If @old_rl is not NULL, decompressed runlist is inserted into the
* appropriate place in @old_rl and the resultant, combined runlist is
* returned. The original @old_rl is deallocated.
*
* On error, return -errno. @old_rl is left unmodified in that case.
*
* The following error codes are defined:
* -ENOMEM - Not enough memory to allocate runlist array.
* -EIO - Corrupt runlist.
* -EINVAL - Invalid parameters were passed in.
* -ERANGE - The two runlists overlap.
*
* FIXME: For now we take the conceptionally simplest approach of creating the
* new runlist disregarding the already existing one and then splicing the
* two into one, if that is possible (we check for overlap and discard the new
* runlist if overlap present before returning ERR_PTR(-ERANGE)).
*/
runlist_element *ntfs_mapping_pairs_decompress(const ntfs_volume *vol,
const ATTR_RECORD *attr, runlist_element *old_rl)
{
VCN vcn; /* Current vcn. */
LCN lcn; /* Current lcn. */
s64 deltaxcn; /* Change in [vl]cn. */
runlist_element *rl; /* The output runlist. */
u8 *buf; /* Current position in mapping pairs array. */
u8 *attr_end; /* End of attribute. */
int rlsize; /* Size of runlist buffer. */
u16 rlpos; /* Current runlist position in units of
runlist_elements. */
u8 b; /* Current byte offset in buf. */
#ifdef DEBUG
/* Make sure attr exists and is non-resident. */
if (!attr || !attr->non_resident || sle64_to_cpu(
attr->data.non_resident.lowest_vcn) < (VCN)0) {
ntfs_error(vol->sb, "Invalid arguments.");
return ERR_PTR(-EINVAL);
}
#endif
/* Start at vcn = lowest_vcn and lcn 0. */
vcn = sle64_to_cpu(attr->data.non_resident.lowest_vcn);
lcn = 0;
/* Get start of the mapping pairs array. */
buf = (u8*)attr + le16_to_cpu(
attr->data.non_resident.mapping_pairs_offset);
attr_end = (u8*)attr + le32_to_cpu(attr->length);
if (unlikely(buf < (u8*)attr || buf > attr_end)) {
ntfs_error(vol->sb, "Corrupt attribute.");
return ERR_PTR(-EIO);
}
/* If the mapping pairs array is valid but empty, nothing to do. */
if (!vcn && !*buf)
return old_rl;
/* Current position in runlist array. */
rlpos = 0;
/* Allocate first page and set current runlist size to one page. */
rl = ntfs_malloc_nofs(rlsize = PAGE_SIZE);
if (unlikely(!rl))
return ERR_PTR(-ENOMEM);
/* Insert unmapped starting element if necessary. */
if (vcn) {
rl->vcn = 0;
rl->lcn = LCN_RL_NOT_MAPPED;
rl->length = vcn;
rlpos++;
}
while (buf < attr_end && *buf) {
/*
* Allocate more memory if needed, including space for the
* not-mapped and terminator elements. ntfs_malloc_nofs()
* operates on whole pages only.
*/
if (((rlpos + 3) * sizeof(*old_rl)) > rlsize) {
runlist_element *rl2;
rl2 = ntfs_malloc_nofs(rlsize + (int)PAGE_SIZE);
if (unlikely(!rl2)) {
ntfs_free(rl);
return ERR_PTR(-ENOMEM);
}
memcpy(rl2, rl, rlsize);
ntfs_free(rl);
rl = rl2;
rlsize += PAGE_SIZE;
}
/* Enter the current vcn into the current runlist element. */
rl[rlpos].vcn = vcn;
/*
* Get the change in vcn, i.e. the run length in clusters.
* Doing it this way ensures that we signextend negative values.
* A negative run length doesn't make any sense, but hey, I
* didn't make up the NTFS specs and Windows NT4 treats the run
* length as a signed value so that's how it is...
*/
b = *buf & 0xf;
if (b) {
if (unlikely(buf + b > attr_end))
goto io_error;
for (deltaxcn = (s8)buf[b--]; b; b--)
deltaxcn = (deltaxcn << 8) + buf[b];
} else { /* The length entry is compulsory. */
ntfs_error(vol->sb, "Missing length entry in mapping "
"pairs array.");
deltaxcn = (s64)-1;
}
/*
* Assume a negative length to indicate data corruption and
* hence clean-up and return NULL.
*/
if (unlikely(deltaxcn < 0)) {
ntfs_error(vol->sb, "Invalid length in mapping pairs "
"array.");
goto err_out;
}
/*
* Enter the current run length into the current runlist
* element.
*/
rl[rlpos].length = deltaxcn;
/* Increment the current vcn by the current run length. */
vcn += deltaxcn;
/*
* There might be no lcn change at all, as is the case for
* sparse clusters on NTFS 3.0+, in which case we set the lcn
* to LCN_HOLE.
*/
if (!(*buf & 0xf0))
rl[rlpos].lcn = LCN_HOLE;
else {
/* Get the lcn change which really can be negative. */
u8 b2 = *buf & 0xf;
b = b2 + ((*buf >> 4) & 0xf);
if (buf + b > attr_end)
goto io_error;
for (deltaxcn = (s8)buf[b--]; b > b2; b--)
deltaxcn = (deltaxcn << 8) + buf[b];
/* Change the current lcn to its new value. */
lcn += deltaxcn;
#ifdef DEBUG
/*
* On NTFS 1.2-, apparently can have lcn == -1 to
* indicate a hole. But we haven't verified ourselves
* whether it is really the lcn or the deltaxcn that is
* -1. So if either is found give us a message so we
* can investigate it further!
*/
if (vol->major_ver < 3) {
if (unlikely(deltaxcn == (LCN)-1))
ntfs_error(vol->sb, "lcn delta == -1");
if (unlikely(lcn == (LCN)-1))
ntfs_error(vol->sb, "lcn == -1");
}
#endif
/* Check lcn is not below -1. */
if (unlikely(lcn < (LCN)-1)) {
ntfs_error(vol->sb, "Invalid LCN < -1 in "
"mapping pairs array.");
goto err_out;
}
/* Enter the current lcn into the runlist element. */
rl[rlpos].lcn = lcn;
}
/* Get to the next runlist element. */
rlpos++;
/* Increment the buffer position to the next mapping pair. */
buf += (*buf & 0xf) + ((*buf >> 4) & 0xf) + 1;
}
if (unlikely(buf >= attr_end))
goto io_error;
/*
* If there is a highest_vcn specified, it must be equal to the final
* vcn in the runlist - 1, or something has gone badly wrong.
*/
deltaxcn = sle64_to_cpu(attr->data.non_resident.highest_vcn);
if (unlikely(deltaxcn && vcn - 1 != deltaxcn)) {
mpa_err:
ntfs_error(vol->sb, "Corrupt mapping pairs array in "
"non-resident attribute.");
goto err_out;
}
/* Setup not mapped runlist element if this is the base extent. */
if (!attr->data.non_resident.lowest_vcn) {
VCN max_cluster;
max_cluster = ((sle64_to_cpu(
attr->data.non_resident.allocated_size) +
vol->cluster_size - 1) >>
vol->cluster_size_bits) - 1;
/*
* A highest_vcn of zero means this is a single extent
* attribute so simply terminate the runlist with LCN_ENOENT).
*/
if (deltaxcn) {
/*
* If there is a difference between the highest_vcn and
* the highest cluster, the runlist is either corrupt
* or, more likely, there are more extents following
* this one.
*/
if (deltaxcn < max_cluster) {
ntfs_debug("More extents to follow; deltaxcn "
"= 0x%llx, max_cluster = "
"0x%llx",
(unsigned long long)deltaxcn,
(unsigned long long)
max_cluster);
rl[rlpos].vcn = vcn;
vcn += rl[rlpos].length = max_cluster -
deltaxcn;
rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
rlpos++;
} else if (unlikely(deltaxcn > max_cluster)) {
ntfs_error(vol->sb, "Corrupt attribute. "
"deltaxcn = 0x%llx, "
"max_cluster = 0x%llx",
(unsigned long long)deltaxcn,
(unsigned long long)
max_cluster);
goto mpa_err;
}
}
rl[rlpos].lcn = LCN_ENOENT;
} else /* Not the base extent. There may be more extents to follow. */
rl[rlpos].lcn = LCN_RL_NOT_MAPPED;
/* Setup terminating runlist element. */
rl[rlpos].vcn = vcn;
rl[rlpos].length = (s64)0;
/* If no existing runlist was specified, we are done. */
if (!old_rl) {
ntfs_debug("Mapping pairs array successfully decompressed:");
ntfs_debug_dump_runlist(rl);
return rl;
}
/* Now combine the new and old runlists checking for overlaps. */
old_rl = ntfs_runlists_merge(old_rl, rl);
if (!IS_ERR(old_rl))
return old_rl;
ntfs_free(rl);
ntfs_error(vol->sb, "Failed to merge runlists.");
return old_rl;
io_error:
ntfs_error(vol->sb, "Corrupt attribute.");
err_out:
ntfs_free(rl);
return ERR_PTR(-EIO);
}
/**
* ntfs_rl_vcn_to_lcn - convert a vcn into a lcn given a runlist
* @rl: runlist to use for conversion
* @vcn: vcn to convert
*
* Convert the virtual cluster number @vcn of an attribute into a logical
* cluster number (lcn) of a device using the runlist @rl to map vcns to their
* corresponding lcns.
*
* It is up to the caller to serialize access to the runlist @rl.
*
* Since lcns must be >= 0, we use negative return codes with special meaning:
*
* Return code Meaning / Description
* ==================================================
* LCN_HOLE Hole / not allocated on disk.
* LCN_RL_NOT_MAPPED This is part of the runlist which has not been
* inserted into the runlist yet.
* LCN_ENOENT There is no such vcn in the attribute.
*
* Locking: - The caller must have locked the runlist (for reading or writing).
* - This function does not touch the lock, nor does it modify the
* runlist.
*/
LCN ntfs_rl_vcn_to_lcn(const runlist_element *rl, const VCN vcn)
{
int i;
BUG_ON(vcn < 0);
/*
* If rl is NULL, assume that we have found an unmapped runlist. The
* caller can then attempt to map it and fail appropriately if
* necessary.
*/
if (unlikely(!rl))
return LCN_RL_NOT_MAPPED;
/* Catch out of lower bounds vcn. */
if (unlikely(vcn < rl[0].vcn))
return LCN_ENOENT;
for (i = 0; likely(rl[i].length); i++) {
if (unlikely(vcn < rl[i+1].vcn)) {
if (likely(rl[i].lcn >= (LCN)0))
return rl[i].lcn + (vcn - rl[i].vcn);
return rl[i].lcn;
}
}
/*
* The terminator element is setup to the correct value, i.e. one of
* LCN_HOLE, LCN_RL_NOT_MAPPED, or LCN_ENOENT.
*/
if (likely(rl[i].lcn < (LCN)0))
return rl[i].lcn;
/* Just in case... We could replace this with BUG() some day. */
return LCN_ENOENT;
}
#ifdef NTFS_RW
/**
* ntfs_rl_find_vcn_nolock - find a vcn in a runlist
* @rl: runlist to search
* @vcn: vcn to find
*
* Find the virtual cluster number @vcn in the runlist @rl and return the
* address of the runlist element containing the @vcn on success.
*
* Return NULL if @rl is NULL or @vcn is in an unmapped part/out of bounds of
* the runlist.
*
* Locking: The runlist must be locked on entry.
*/
runlist_element *ntfs_rl_find_vcn_nolock(runlist_element *rl, const VCN vcn)
{
BUG_ON(vcn < 0);
if (unlikely(!rl || vcn < rl[0].vcn))
return NULL;
while (likely(rl->length)) {
if (unlikely(vcn < rl[1].vcn)) {
if (likely(rl->lcn >= LCN_HOLE))
return rl;
return NULL;
}
rl++;
}
if (likely(rl->lcn == LCN_ENOENT))
return rl;
return NULL;
}
/**
* ntfs_get_nr_significant_bytes - get number of bytes needed to store a number
* @n: number for which to get the number of bytes for
*
* Return the number of bytes required to store @n unambiguously as
* a signed number.
*
* This is used in the context of the mapping pairs array to determine how
* many bytes will be needed in the array to store a given logical cluster
* number (lcn) or a specific run length.
*
* Return the number of bytes written. This function cannot fail.
*/
static inline int ntfs_get_nr_significant_bytes(const s64 n)
{
s64 l = n;
int i;
s8 j;
i = 0;
do {
l >>= 8;
i++;
} while (l != 0 && l != -1);
j = (n >> 8 * (i - 1)) & 0xff;
/* If the sign bit is wrong, we need an extra byte. */
if ((n < 0 && j >= 0) || (n > 0 && j < 0))
i++;
return i;
}
/**
* ntfs_get_size_for_mapping_pairs - get bytes needed for mapping pairs array
* @vol: ntfs volume (needed for the ntfs version)
* @rl: locked runlist to determine the size of the mapping pairs of
* @first_vcn: first vcn which to include in the mapping pairs array
* @last_vcn: last vcn which to include in the mapping pairs array
*
* Walk the locked runlist @rl and calculate the size in bytes of the mapping
* pairs array corresponding to the runlist @rl, starting at vcn @first_vcn and
* finishing with vcn @last_vcn.
*
* A @last_vcn of -1 means end of runlist and in that case the size of the
* mapping pairs array corresponding to the runlist starting at vcn @first_vcn
* and finishing at the end of the runlist is determined.
*
* This for example allows us to allocate a buffer of the right size when
* building the mapping pairs array.
*
* If @rl is NULL, just return 1 (for the single terminator byte).
*
* Return the calculated size in bytes on success. On error, return -errno.
* The following error codes are defined:
* -EINVAL - Run list contains unmapped elements. Make sure to only pass
* fully mapped runlists to this function.
* -EIO - The runlist is corrupt.
*
* Locking: @rl must be locked on entry (either for reading or writing), it
* remains locked throughout, and is left locked upon return.
*/
int ntfs_get_size_for_mapping_pairs(const ntfs_volume *vol,
const runlist_element *rl, const VCN first_vcn,
const VCN last_vcn)
{
LCN prev_lcn;
int rls;
bool the_end = false;
BUG_ON(first_vcn < 0);
BUG_ON(last_vcn < -1);
BUG_ON(last_vcn >= 0 && first_vcn > last_vcn);
if (!rl) {
BUG_ON(first_vcn);
BUG_ON(last_vcn > 0);
return 1;
}
/* Skip to runlist element containing @first_vcn. */
while (rl->length && first_vcn >= rl[1].vcn)
rl++;
if (unlikely((!rl->length && first_vcn > rl->vcn) ||
first_vcn < rl->vcn))
return -EINVAL;
prev_lcn = 0;
/* Always need the termining zero byte. */
rls = 1;
/* Do the first partial run if present. */
if (first_vcn > rl->vcn) {
s64 delta, length = rl->length;
/* We know rl->length != 0 already. */
if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
goto err_out;
/*
* If @stop_vcn is given and finishes inside this run, cap the
* run length.
*/
if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn;
the_end = true;
}
delta = first_vcn - rl->vcn;
/* Header byte + length. */
rls += 1 + ntfs_get_nr_significant_bytes(length - delta);
/*
* If the logical cluster number (lcn) denotes a hole and we
* are on NTFS 3.0+, we don't store it at all, i.e. we need
* zero space. On earlier NTFS versions we just store the lcn.
* Note: this assumes that on NTFS 1.2-, holes are stored with
* an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
*/
if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
prev_lcn = rl->lcn;
if (likely(rl->lcn >= 0))
prev_lcn += delta;
/* Change in lcn. */
rls += ntfs_get_nr_significant_bytes(prev_lcn);
}
/* Go to next runlist element. */
rl++;
}
/* Do the full runs. */
for (; rl->length && !the_end; rl++) {
s64 length = rl->length;
if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
goto err_out;
/*
* If @stop_vcn is given and finishes inside this run, cap the
* run length.
*/
if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn;
the_end = true;
}
/* Header byte + length. */
rls += 1 + ntfs_get_nr_significant_bytes(length);
/*
* If the logical cluster number (lcn) denotes a hole and we
* are on NTFS 3.0+, we don't store it at all, i.e. we need
* zero space. On earlier NTFS versions we just store the lcn.
* Note: this assumes that on NTFS 1.2-, holes are stored with
* an lcn of -1 and not a delta_lcn of -1 (unless both are -1).
*/
if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
/* Change in lcn. */
rls += ntfs_get_nr_significant_bytes(rl->lcn -
prev_lcn);
prev_lcn = rl->lcn;
}
}
return rls;
err_out:
if (rl->lcn == LCN_RL_NOT_MAPPED)
rls = -EINVAL;
else
rls = -EIO;
return rls;
}
/**
* ntfs_write_significant_bytes - write the significant bytes of a number
* @dst: destination buffer to write to
* @dst_max: pointer to last byte of destination buffer for bounds checking
* @n: number whose significant bytes to write
*
* Store in @dst, the minimum bytes of the number @n which are required to
* identify @n unambiguously as a signed number, taking care not to exceed
* @dest_max, the maximum position within @dst to which we are allowed to
* write.
*
* This is used when building the mapping pairs array of a runlist to compress
* a given logical cluster number (lcn) or a specific run length to the minimum
* size possible.
*
* Return the number of bytes written on success. On error, i.e. the
* destination buffer @dst is too small, return -ENOSPC.
*/
static inline int ntfs_write_significant_bytes(s8 *dst, const s8 *dst_max,
const s64 n)
{
s64 l = n;
int i;
s8 j;
i = 0;
do {
if (unlikely(dst > dst_max))
goto err_out;
*dst++ = l & 0xffll;
l >>= 8;
i++;
} while (l != 0 && l != -1);
j = (n >> 8 * (i - 1)) & 0xff;
/* If the sign bit is wrong, we need an extra byte. */
if (n < 0 && j >= 0) {
if (unlikely(dst > dst_max))
goto err_out;
i++;
*dst = (s8)-1;
} else if (n > 0 && j < 0) {
if (unlikely(dst > dst_max))
goto err_out;
i++;
*dst = (s8)0;
}
return i;
err_out:
return -ENOSPC;
}
/**
* ntfs_mapping_pairs_build - build the mapping pairs array from a runlist
* @vol: ntfs volume (needed for the ntfs version)
* @dst: destination buffer to which to write the mapping pairs array
* @dst_len: size of destination buffer @dst in bytes
* @rl: locked runlist for which to build the mapping pairs array
* @first_vcn: first vcn which to include in the mapping pairs array
* @last_vcn: last vcn which to include in the mapping pairs array
* @stop_vcn: first vcn outside destination buffer on success or -ENOSPC
*
* Create the mapping pairs array from the locked runlist @rl, starting at vcn
* @first_vcn and finishing with vcn @last_vcn and save the array in @dst.
* @dst_len is the size of @dst in bytes and it should be at least equal to the
* value obtained by calling ntfs_get_size_for_mapping_pairs().
*
* A @last_vcn of -1 means end of runlist and in that case the mapping pairs
* array corresponding to the runlist starting at vcn @first_vcn and finishing
* at the end of the runlist is created.
*
* If @rl is NULL, just write a single terminator byte to @dst.
*
* On success or -ENOSPC error, if @stop_vcn is not NULL, *@stop_vcn is set to
* the first vcn outside the destination buffer. Note that on error, @dst has
* been filled with all the mapping pairs that will fit, thus it can be treated
* as partial success, in that a new attribute extent needs to be created or
* the next extent has to be used and the mapping pairs build has to be
* continued with @first_vcn set to *@stop_vcn.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -EINVAL - Run list contains unmapped elements. Make sure to only pass
* fully mapped runlists to this function.
* -EIO - The runlist is corrupt.
* -ENOSPC - The destination buffer is too small.
*
* Locking: @rl must be locked on entry (either for reading or writing), it
* remains locked throughout, and is left locked upon return.
*/
int ntfs_mapping_pairs_build(const ntfs_volume *vol, s8 *dst,
const int dst_len, const runlist_element *rl,
const VCN first_vcn, const VCN last_vcn, VCN *const stop_vcn)
{
LCN prev_lcn;
s8 *dst_max, *dst_next;
int err = -ENOSPC;
bool the_end = false;
s8 len_len, lcn_len;
BUG_ON(first_vcn < 0);
BUG_ON(last_vcn < -1);
BUG_ON(last_vcn >= 0 && first_vcn > last_vcn);
BUG_ON(dst_len < 1);
if (!rl) {
BUG_ON(first_vcn);
BUG_ON(last_vcn > 0);
if (stop_vcn)
*stop_vcn = 0;
/* Terminator byte. */
*dst = 0;
return 0;
}
/* Skip to runlist element containing @first_vcn. */
while (rl->length && first_vcn >= rl[1].vcn)
rl++;
if (unlikely((!rl->length && first_vcn > rl->vcn) ||
first_vcn < rl->vcn))
return -EINVAL;
/*
* @dst_max is used for bounds checking in
* ntfs_write_significant_bytes().
*/
dst_max = dst + dst_len - 1;
prev_lcn = 0;
/* Do the first partial run if present. */
if (first_vcn > rl->vcn) {
s64 delta, length = rl->length;
/* We know rl->length != 0 already. */
if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
goto err_out;
/*
* If @stop_vcn is given and finishes inside this run, cap the
* run length.
*/
if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn;
the_end = true;
}
delta = first_vcn - rl->vcn;
/* Write length. */
len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
length - delta);
if (unlikely(len_len < 0))
goto size_err;
/*
* If the logical cluster number (lcn) denotes a hole and we
* are on NTFS 3.0+, we don't store it at all, i.e. we need
* zero space. On earlier NTFS versions we just write the lcn
* change. FIXME: Do we need to write the lcn change or just
* the lcn in that case? Not sure as I have never seen this
* case on NT4. - We assume that we just need to write the lcn
* change until someone tells us otherwise... (AIA)
*/
if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
prev_lcn = rl->lcn;
if (likely(rl->lcn >= 0))
prev_lcn += delta;
/* Write change in lcn. */
lcn_len = ntfs_write_significant_bytes(dst + 1 +
len_len, dst_max, prev_lcn);
if (unlikely(lcn_len < 0))
goto size_err;
} else
lcn_len = 0;
dst_next = dst + len_len + lcn_len + 1;
if (unlikely(dst_next > dst_max))
goto size_err;
/* Update header byte. */
*dst = lcn_len << 4 | len_len;
/* Position at next mapping pairs array element. */
dst = dst_next;
/* Go to next runlist element. */
rl++;
}
/* Do the full runs. */
for (; rl->length && !the_end; rl++) {
s64 length = rl->length;
if (unlikely(length < 0 || rl->lcn < LCN_HOLE))
goto err_out;
/*
* If @stop_vcn is given and finishes inside this run, cap the
* run length.
*/
if (unlikely(last_vcn >= 0 && rl[1].vcn > last_vcn)) {
s64 s1 = last_vcn + 1;
if (unlikely(rl[1].vcn > s1))
length = s1 - rl->vcn;
the_end = true;
}
/* Write length. */
len_len = ntfs_write_significant_bytes(dst + 1, dst_max,
length);
if (unlikely(len_len < 0))
goto size_err;
/*
* If the logical cluster number (lcn) denotes a hole and we
* are on NTFS 3.0+, we don't store it at all, i.e. we need
* zero space. On earlier NTFS versions we just write the lcn
* change. FIXME: Do we need to write the lcn change or just
* the lcn in that case? Not sure as I have never seen this
* case on NT4. - We assume that we just need to write the lcn
* change until someone tells us otherwise... (AIA)
*/
if (likely(rl->lcn >= 0 || vol->major_ver < 3)) {
/* Write change in lcn. */
lcn_len = ntfs_write_significant_bytes(dst + 1 +
len_len, dst_max, rl->lcn - prev_lcn);
if (unlikely(lcn_len < 0))
goto size_err;
prev_lcn = rl->lcn;
} else
lcn_len = 0;
dst_next = dst + len_len + lcn_len + 1;
if (unlikely(dst_next > dst_max))
goto size_err;
/* Update header byte. */
*dst = lcn_len << 4 | len_len;
/* Position at next mapping pairs array element. */
dst = dst_next;
}
/* Success. */
err = 0;
size_err:
/* Set stop vcn. */
if (stop_vcn)
*stop_vcn = rl->vcn;
/* Add terminator byte. */
*dst = 0;
return err;
err_out:
if (rl->lcn == LCN_RL_NOT_MAPPED)
err = -EINVAL;
else
err = -EIO;
return err;
}
/**
* ntfs_rl_truncate_nolock - truncate a runlist starting at a specified vcn
* @vol: ntfs volume (needed for error output)
* @runlist: runlist to truncate
* @new_length: the new length of the runlist in VCNs
*
* Truncate the runlist described by @runlist as well as the memory buffer
* holding the runlist elements to a length of @new_length VCNs.
*
* If @new_length lies within the runlist, the runlist elements with VCNs of
* @new_length and above are discarded. As a special case if @new_length is
* zero, the runlist is discarded and set to NULL.
*
* If @new_length lies beyond the runlist, a sparse runlist element is added to
* the end of the runlist @runlist or if the last runlist element is a sparse
* one already, this is extended.
*
* Note, no checking is done for unmapped runlist elements. It is assumed that
* the caller has mapped any elements that need to be mapped already.
*
* Return 0 on success and -errno on error.
*
* Locking: The caller must hold @runlist->lock for writing.
*/
int ntfs_rl_truncate_nolock(const ntfs_volume *vol, runlist *const runlist,
const s64 new_length)
{
runlist_element *rl;
int old_size;
ntfs_debug("Entering for new_length 0x%llx.", (long long)new_length);
BUG_ON(!runlist);
BUG_ON(new_length < 0);
rl = runlist->rl;
if (!new_length) {
ntfs_debug("Freeing runlist.");
runlist->rl = NULL;
if (rl)
ntfs_free(rl);
return 0;
}
if (unlikely(!rl)) {
/*
* Create a runlist consisting of a sparse runlist element of
* length @new_length followed by a terminator runlist element.
*/
rl = ntfs_malloc_nofs(PAGE_SIZE);
if (unlikely(!rl)) {
ntfs_error(vol->sb, "Not enough memory to allocate "
"runlist element buffer.");
return -ENOMEM;
}
runlist->rl = rl;
rl[1].length = rl->vcn = 0;
rl->lcn = LCN_HOLE;
rl[1].vcn = rl->length = new_length;
rl[1].lcn = LCN_ENOENT;
return 0;
}
BUG_ON(new_length < rl->vcn);
/* Find @new_length in the runlist. */
while (likely(rl->length && new_length >= rl[1].vcn))
rl++;
/*
* If not at the end of the runlist we need to shrink it.
* If at the end of the runlist we need to expand it.
*/
if (rl->length) {
runlist_element *trl;
bool is_end;
ntfs_debug("Shrinking runlist.");
/* Determine the runlist size. */
trl = rl + 1;
while (likely(trl->length))
trl++;
old_size = trl - runlist->rl + 1;
/* Truncate the run. */
rl->length = new_length - rl->vcn;
/*
* If a run was partially truncated, make the following runlist
* element a terminator.
*/
is_end = false;
if (rl->length) {
rl++;
if (!rl->length)
is_end = true;
rl->vcn = new_length;
rl->length = 0;
}
rl->lcn = LCN_ENOENT;
/* Reallocate memory if necessary. */
if (!is_end) {
int new_size = rl - runlist->rl + 1;
rl = ntfs_rl_realloc(runlist->rl, old_size, new_size);
if (IS_ERR(rl))
ntfs_warning(vol->sb, "Failed to shrink "
"runlist buffer. This just "
"wastes a bit of memory "
"temporarily so we ignore it "
"and return success.");
else
runlist->rl = rl;
}
} else if (likely(/* !rl->length && */ new_length > rl->vcn)) {
ntfs_debug("Expanding runlist.");
/*
* If there is a previous runlist element and it is a sparse
* one, extend it. Otherwise need to add a new, sparse runlist
* element.
*/
if ((rl > runlist->rl) && ((rl - 1)->lcn == LCN_HOLE))
(rl - 1)->length = new_length - (rl - 1)->vcn;
else {
/* Determine the runlist size. */
old_size = rl - runlist->rl + 1;
/* Reallocate memory if necessary. */
rl = ntfs_rl_realloc(runlist->rl, old_size,
old_size + 1);
if (IS_ERR(rl)) {
ntfs_error(vol->sb, "Failed to expand runlist "
"buffer, aborting.");
return PTR_ERR(rl);
}
runlist->rl = rl;
/*
* Set @rl to the same runlist element in the new
* runlist as before in the old runlist.
*/
rl += old_size - 1;
/* Add a new, sparse runlist element. */
rl->lcn = LCN_HOLE;
rl->length = new_length - rl->vcn;
/* Add a new terminator runlist element. */
rl++;
rl->length = 0;
}
rl->vcn = new_length;
rl->lcn = LCN_ENOENT;
} else /* if (unlikely(!rl->length && new_length == rl->vcn)) */ {
/* Runlist already has same size as requested. */
rl->lcn = LCN_ENOENT;
}
ntfs_debug("Done.");
return 0;
}
/**
* ntfs_rl_punch_nolock - punch a hole into a runlist
* @vol: ntfs volume (needed for error output)
* @runlist: runlist to punch a hole into
* @start: starting VCN of the hole to be created
* @length: size of the hole to be created in units of clusters
*
* Punch a hole into the runlist @runlist starting at VCN @start and of size
* @length clusters.
*
* Return 0 on success and -errno on error, in which case @runlist has not been
* modified.
*
* If @start and/or @start + @length are outside the runlist return error code
* -ENOENT.
*
* If the runlist contains unmapped or error elements between @start and @start
* + @length return error code -EINVAL.
*
* Locking: The caller must hold @runlist->lock for writing.
*/
int ntfs_rl_punch_nolock(const ntfs_volume *vol, runlist *const runlist,
const VCN start, const s64 length)
{
const VCN end = start + length;
s64 delta;
runlist_element *rl, *rl_end, *rl_real_end, *trl;
int old_size;
bool lcn_fixup = false;
ntfs_debug("Entering for start 0x%llx, length 0x%llx.",
(long long)start, (long long)length);
BUG_ON(!runlist);
BUG_ON(start < 0);
BUG_ON(length < 0);
BUG_ON(end < 0);
rl = runlist->rl;
if (unlikely(!rl)) {
if (likely(!start && !length))
return 0;
return -ENOENT;
}
/* Find @start in the runlist. */
while (likely(rl->length && start >= rl[1].vcn))
rl++;
rl_end = rl;
/* Find @end in the runlist. */
while (likely(rl_end->length && end >= rl_end[1].vcn)) {
/* Verify there are no unmapped or error elements. */
if (unlikely(rl_end->lcn < LCN_HOLE))
return -EINVAL;
rl_end++;
}
/* Check the last element. */
if (unlikely(rl_end->length && rl_end->lcn < LCN_HOLE))
return -EINVAL;
/* This covers @start being out of bounds, too. */
if (!rl_end->length && end > rl_end->vcn)
return -ENOENT;
if (!length)
return 0;
if (!rl->length)
return -ENOENT;
rl_real_end = rl_end;
/* Determine the runlist size. */
while (likely(rl_real_end->length))
rl_real_end++;
old_size = rl_real_end - runlist->rl + 1;
/* If @start is in a hole simply extend the hole. */
if (rl->lcn == LCN_HOLE) {
/*
* If both @start and @end are in the same sparse run, we are
* done.
*/
if (end <= rl[1].vcn) {
ntfs_debug("Done (requested hole is already sparse).");
return 0;
}
extend_hole:
/* Extend the hole. */
rl->length = end - rl->vcn;
/* If @end is in a hole, merge it with the current one. */
if (rl_end->lcn == LCN_HOLE) {
rl_end++;
rl->length = rl_end->vcn - rl->vcn;
}
/* We have done the hole. Now deal with the remaining tail. */
rl++;
/* Cut out all runlist elements up to @end. */
if (rl < rl_end)
memmove(rl, rl_end, (rl_real_end - rl_end + 1) *
sizeof(*rl));
/* Adjust the beginning of the tail if necessary. */
if (end > rl->vcn) {
delta = end - rl->vcn;
rl->vcn = end;
rl->length -= delta;
/* Only adjust the lcn if it is real. */
if (rl->lcn >= 0)
rl->lcn += delta;
}
shrink_allocation:
/* Reallocate memory if the allocation changed. */
if (rl < rl_end) {
rl = ntfs_rl_realloc(runlist->rl, old_size,
old_size - (rl_end - rl));
if (IS_ERR(rl))
ntfs_warning(vol->sb, "Failed to shrink "
"runlist buffer. This just "
"wastes a bit of memory "
"temporarily so we ignore it "
"and return success.");
else
runlist->rl = rl;
}
ntfs_debug("Done (extend hole).");
return 0;
}
/*
* If @start is at the beginning of a run things are easier as there is
* no need to split the first run.
*/
if (start == rl->vcn) {
/*
* @start is at the beginning of a run.
*
* If the previous run is sparse, extend its hole.
*
* If @end is not in the same run, switch the run to be sparse
* and extend the newly created hole.
*
* Thus both of these cases reduce the problem to the above
* case of "@start is in a hole".
*/
if (rl > runlist->rl && (rl - 1)->lcn == LCN_HOLE) {
rl--;
goto extend_hole;
}
if (end >= rl[1].vcn) {
rl->lcn = LCN_HOLE;
goto extend_hole;
}
/*
* The final case is when @end is in the same run as @start.
* For this need to split the run into two. One run for the
* sparse region between the beginning of the old run, i.e.
* @start, and @end and one for the remaining non-sparse
* region, i.e. between @end and the end of the old run.
*/
trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1);
if (IS_ERR(trl))
goto enomem_out;
old_size++;
if (runlist->rl != trl) {
rl = trl + (rl - runlist->rl);
rl_end = trl + (rl_end - runlist->rl);
rl_real_end = trl + (rl_real_end - runlist->rl);
runlist->rl = trl;
}
split_end:
/* Shift all the runs up by one. */
memmove(rl + 1, rl, (rl_real_end - rl + 1) * sizeof(*rl));
/* Finally, setup the two split runs. */
rl->lcn = LCN_HOLE;
rl->length = length;
rl++;
rl->vcn += length;
/* Only adjust the lcn if it is real. */
if (rl->lcn >= 0 || lcn_fixup)
rl->lcn += length;
rl->length -= length;
ntfs_debug("Done (split one).");
return 0;
}
/*
* @start is neither in a hole nor at the beginning of a run.
*
* If @end is in a hole, things are easier as simply truncating the run
* @start is in to end at @start - 1, deleting all runs after that up
* to @end, and finally extending the beginning of the run @end is in
* to be @start is all that is needed.
*/
if (rl_end->lcn == LCN_HOLE) {
/* Truncate the run containing @start. */
rl->length = start - rl->vcn;
rl++;
/* Cut out all runlist elements up to @end. */
if (rl < rl_end)
memmove(rl, rl_end, (rl_real_end - rl_end + 1) *
sizeof(*rl));
/* Extend the beginning of the run @end is in to be @start. */
rl->vcn = start;
rl->length = rl[1].vcn - start;
goto shrink_allocation;
}
/*
* If @end is not in a hole there are still two cases to distinguish.
* Either @end is or is not in the same run as @start.
*
* The second case is easier as it can be reduced to an already solved
* problem by truncating the run @start is in to end at @start - 1.
* Then, if @end is in the next run need to split the run into a sparse
* run followed by a non-sparse run (already covered above) and if @end
* is not in the next run switching it to be sparse, again reduces the
* problem to the already covered case of "@start is in a hole".
*/
if (end >= rl[1].vcn) {
/*
* If @end is not in the next run, reduce the problem to the
* case of "@start is in a hole".
*/
if (rl[1].length && end >= rl[2].vcn) {
/* Truncate the run containing @start. */
rl->length = start - rl->vcn;
rl++;
rl->vcn = start;
rl->lcn = LCN_HOLE;
goto extend_hole;
}
trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 1);
if (IS_ERR(trl))
goto enomem_out;
old_size++;
if (runlist->rl != trl) {
rl = trl + (rl - runlist->rl);
rl_end = trl + (rl_end - runlist->rl);
rl_real_end = trl + (rl_real_end - runlist->rl);
runlist->rl = trl;
}
/* Truncate the run containing @start. */
rl->length = start - rl->vcn;
rl++;
/*
* @end is in the next run, reduce the problem to the case
* where "@start is at the beginning of a run and @end is in
* the same run as @start".
*/
delta = rl->vcn - start;
rl->vcn = start;
if (rl->lcn >= 0) {
rl->lcn -= delta;
/* Need this in case the lcn just became negative. */
lcn_fixup = true;
}
rl->length += delta;
goto split_end;
}
/*
* The first case from above, i.e. @end is in the same run as @start.
* We need to split the run into three. One run for the non-sparse
* region between the beginning of the old run and @start, one for the
* sparse region between @start and @end, and one for the remaining
* non-sparse region, i.e. between @end and the end of the old run.
*/
trl = ntfs_rl_realloc(runlist->rl, old_size, old_size + 2);
if (IS_ERR(trl))
goto enomem_out;
old_size += 2;
if (runlist->rl != trl) {
rl = trl + (rl - runlist->rl);
rl_end = trl + (rl_end - runlist->rl);
rl_real_end = trl + (rl_real_end - runlist->rl);
runlist->rl = trl;
}
/* Shift all the runs up by two. */
memmove(rl + 2, rl, (rl_real_end - rl + 1) * sizeof(*rl));
/* Finally, setup the three split runs. */
rl->length = start - rl->vcn;
rl++;
rl->vcn = start;
rl->lcn = LCN_HOLE;
rl->length = length;
rl++;
delta = end - rl->vcn;
rl->vcn = end;
rl->lcn += delta;
rl->length -= delta;
ntfs_debug("Done (split both).");
return 0;
enomem_out:
ntfs_error(vol->sb, "Not enough memory to extend runlist buffer.");
return -ENOMEM;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/runlist.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* quota.c - NTFS kernel quota ($Quota) handling. Part of the Linux-NTFS
* project.
*
* Copyright (c) 2004 Anton Altaparmakov
*/
#ifdef NTFS_RW
#include "index.h"
#include "quota.h"
#include "debug.h"
#include "ntfs.h"
/**
* ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume
* @vol: ntfs volume on which to mark the quotas out of date
*
* Mark the quotas out of date on the ntfs volume @vol and return 'true' on
* success and 'false' on error.
*/
bool ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
{
ntfs_index_context *ictx;
QUOTA_CONTROL_ENTRY *qce;
const le32 qid = QUOTA_DEFAULTS_ID;
int err;
ntfs_debug("Entering.");
if (NVolQuotaOutOfDate(vol))
goto done;
if (!vol->quota_ino || !vol->quota_q_ino) {
ntfs_error(vol->sb, "Quota inodes are not open.");
return false;
}
inode_lock(vol->quota_q_ino);
ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino));
if (!ictx) {
ntfs_error(vol->sb, "Failed to get index context.");
goto err_out;
}
err = ntfs_index_lookup(&qid, sizeof(qid), ictx);
if (err) {
if (err == -ENOENT)
ntfs_error(vol->sb, "Quota defaults entry is not "
"present.");
else
ntfs_error(vol->sb, "Lookup of quota defaults entry "
"failed.");
goto err_out;
}
if (ictx->data_len < offsetof(QUOTA_CONTROL_ENTRY, sid)) {
ntfs_error(vol->sb, "Quota defaults entry size is invalid. "
"Run chkdsk.");
goto err_out;
}
qce = (QUOTA_CONTROL_ENTRY*)ictx->data;
if (le32_to_cpu(qce->version) != QUOTA_VERSION) {
ntfs_error(vol->sb, "Quota defaults entry version 0x%x is not "
"supported.", le32_to_cpu(qce->version));
goto err_out;
}
ntfs_debug("Quota defaults flags = 0x%x.", le32_to_cpu(qce->flags));
/* If quotas are already marked out of date, no need to do anything. */
if (qce->flags & QUOTA_FLAG_OUT_OF_DATE)
goto set_done;
/*
* If quota tracking is neither requested, nor enabled and there are no
* pending deletes, no need to mark the quotas out of date.
*/
if (!(qce->flags & (QUOTA_FLAG_TRACKING_ENABLED |
QUOTA_FLAG_TRACKING_REQUESTED |
QUOTA_FLAG_PENDING_DELETES)))
goto set_done;
/*
* Set the QUOTA_FLAG_OUT_OF_DATE bit thus marking quotas out of date.
* This is verified on WinXP to be sufficient to cause windows to
* rescan the volume on boot and update all quota entries.
*/
qce->flags |= QUOTA_FLAG_OUT_OF_DATE;
/* Ensure the modified flags are written to disk. */
ntfs_index_entry_flush_dcache_page(ictx);
ntfs_index_entry_mark_dirty(ictx);
set_done:
ntfs_index_ctx_put(ictx);
inode_unlock(vol->quota_q_ino);
/*
* We set the flag so we do not try to mark the quotas out of date
* again on remount.
*/
NVolSetQuotaOutOfDate(vol);
done:
ntfs_debug("Done.");
return true;
err_out:
if (ictx)
ntfs_index_ctx_put(ictx);
inode_unlock(vol->quota_q_ino);
return false;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/quota.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* logfile.c - NTFS kernel journal handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2002-2007 Anton Altaparmakov
*/
#ifdef NTFS_RW
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/buffer_head.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/bio.h>
#include "attrib.h"
#include "aops.h"
#include "debug.h"
#include "logfile.h"
#include "malloc.h"
#include "volume.h"
#include "ntfs.h"
/**
* ntfs_check_restart_page_header - check the page header for consistency
* @vi: $LogFile inode to which the restart page header belongs
* @rp: restart page header to check
* @pos: position in @vi at which the restart page header resides
*
* Check the restart page header @rp for consistency and return 'true' if it is
* consistent and 'false' otherwise.
*
* This function only needs NTFS_BLOCK_SIZE bytes in @rp, i.e. it does not
* require the full restart page.
*/
static bool ntfs_check_restart_page_header(struct inode *vi,
RESTART_PAGE_HEADER *rp, s64 pos)
{
u32 logfile_system_page_size, logfile_log_page_size;
u16 ra_ofs, usa_count, usa_ofs, usa_end = 0;
bool have_usa = true;
ntfs_debug("Entering.");
/*
* If the system or log page sizes are smaller than the ntfs block size
* or either is not a power of 2 we cannot handle this log file.
*/
logfile_system_page_size = le32_to_cpu(rp->system_page_size);
logfile_log_page_size = le32_to_cpu(rp->log_page_size);
if (logfile_system_page_size < NTFS_BLOCK_SIZE ||
logfile_log_page_size < NTFS_BLOCK_SIZE ||
logfile_system_page_size &
(logfile_system_page_size - 1) ||
!is_power_of_2(logfile_log_page_size)) {
ntfs_error(vi->i_sb, "$LogFile uses unsupported page size.");
return false;
}
/*
* We must be either at !pos (1st restart page) or at pos = system page
* size (2nd restart page).
*/
if (pos && pos != logfile_system_page_size) {
ntfs_error(vi->i_sb, "Found restart area in incorrect "
"position in $LogFile.");
return false;
}
/* We only know how to handle version 1.1. */
if (sle16_to_cpu(rp->major_ver) != 1 ||
sle16_to_cpu(rp->minor_ver) != 1) {
ntfs_error(vi->i_sb, "$LogFile version %i.%i is not "
"supported. (This driver supports version "
"1.1 only.)", (int)sle16_to_cpu(rp->major_ver),
(int)sle16_to_cpu(rp->minor_ver));
return false;
}
/*
* If chkdsk has been run the restart page may not be protected by an
* update sequence array.
*/
if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) {
have_usa = false;
goto skip_usa_checks;
}
/* Verify the size of the update sequence array. */
usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS);
if (usa_count != le16_to_cpu(rp->usa_count)) {
ntfs_error(vi->i_sb, "$LogFile restart page specifies "
"inconsistent update sequence array count.");
return false;
}
/* Verify the position of the update sequence array. */
usa_ofs = le16_to_cpu(rp->usa_ofs);
usa_end = usa_ofs + usa_count * sizeof(u16);
if (usa_ofs < sizeof(RESTART_PAGE_HEADER) ||
usa_end > NTFS_BLOCK_SIZE - sizeof(u16)) {
ntfs_error(vi->i_sb, "$LogFile restart page specifies "
"inconsistent update sequence array offset.");
return false;
}
skip_usa_checks:
/*
* Verify the position of the restart area. It must be:
* - aligned to 8-byte boundary,
* - after the update sequence array, and
* - within the system page size.
*/
ra_ofs = le16_to_cpu(rp->restart_area_offset);
if (ra_ofs & 7 || (have_usa ? ra_ofs < usa_end :
ra_ofs < sizeof(RESTART_PAGE_HEADER)) ||
ra_ofs > logfile_system_page_size) {
ntfs_error(vi->i_sb, "$LogFile restart page specifies "
"inconsistent restart area offset.");
return false;
}
/*
* Only restart pages modified by chkdsk are allowed to have chkdsk_lsn
* set.
*/
if (!ntfs_is_chkd_record(rp->magic) && sle64_to_cpu(rp->chkdsk_lsn)) {
ntfs_error(vi->i_sb, "$LogFile restart page is not modified "
"by chkdsk but a chkdsk LSN is specified.");
return false;
}
ntfs_debug("Done.");
return true;
}
/**
* ntfs_check_restart_area - check the restart area for consistency
* @vi: $LogFile inode to which the restart page belongs
* @rp: restart page whose restart area to check
*
* Check the restart area of the restart page @rp for consistency and return
* 'true' if it is consistent and 'false' otherwise.
*
* This function assumes that the restart page header has already been
* consistency checked.
*
* This function only needs NTFS_BLOCK_SIZE bytes in @rp, i.e. it does not
* require the full restart page.
*/
static bool ntfs_check_restart_area(struct inode *vi, RESTART_PAGE_HEADER *rp)
{
u64 file_size;
RESTART_AREA *ra;
u16 ra_ofs, ra_len, ca_ofs;
u8 fs_bits;
ntfs_debug("Entering.");
ra_ofs = le16_to_cpu(rp->restart_area_offset);
ra = (RESTART_AREA*)((u8*)rp + ra_ofs);
/*
* Everything before ra->file_size must be before the first word
* protected by an update sequence number. This ensures that it is
* safe to access ra->client_array_offset.
*/
if (ra_ofs + offsetof(RESTART_AREA, file_size) >
NTFS_BLOCK_SIZE - sizeof(u16)) {
ntfs_error(vi->i_sb, "$LogFile restart area specifies "
"inconsistent file offset.");
return false;
}
/*
* Now that we can access ra->client_array_offset, make sure everything
* up to the log client array is before the first word protected by an
* update sequence number. This ensures we can access all of the
* restart area elements safely. Also, the client array offset must be
* aligned to an 8-byte boundary.
*/
ca_ofs = le16_to_cpu(ra->client_array_offset);
if (((ca_ofs + 7) & ~7) != ca_ofs ||
ra_ofs + ca_ofs > NTFS_BLOCK_SIZE - sizeof(u16)) {
ntfs_error(vi->i_sb, "$LogFile restart area specifies "
"inconsistent client array offset.");
return false;
}
/*
* The restart area must end within the system page size both when
* calculated manually and as specified by ra->restart_area_length.
* Also, the calculated length must not exceed the specified length.
*/
ra_len = ca_ofs + le16_to_cpu(ra->log_clients) *
sizeof(LOG_CLIENT_RECORD);
if (ra_ofs + ra_len > le32_to_cpu(rp->system_page_size) ||
ra_ofs + le16_to_cpu(ra->restart_area_length) >
le32_to_cpu(rp->system_page_size) ||
ra_len > le16_to_cpu(ra->restart_area_length)) {
ntfs_error(vi->i_sb, "$LogFile restart area is out of bounds "
"of the system page size specified by the "
"restart page header and/or the specified "
"restart area length is inconsistent.");
return false;
}
/*
* The ra->client_free_list and ra->client_in_use_list must be either
* LOGFILE_NO_CLIENT or less than ra->log_clients or they are
* overflowing the client array.
*/
if ((ra->client_free_list != LOGFILE_NO_CLIENT &&
le16_to_cpu(ra->client_free_list) >=
le16_to_cpu(ra->log_clients)) ||
(ra->client_in_use_list != LOGFILE_NO_CLIENT &&
le16_to_cpu(ra->client_in_use_list) >=
le16_to_cpu(ra->log_clients))) {
ntfs_error(vi->i_sb, "$LogFile restart area specifies "
"overflowing client free and/or in use lists.");
return false;
}
/*
* Check ra->seq_number_bits against ra->file_size for consistency.
* We cannot just use ffs() because the file size is not a power of 2.
*/
file_size = (u64)sle64_to_cpu(ra->file_size);
fs_bits = 0;
while (file_size) {
file_size >>= 1;
fs_bits++;
}
if (le32_to_cpu(ra->seq_number_bits) != 67 - fs_bits) {
ntfs_error(vi->i_sb, "$LogFile restart area specifies "
"inconsistent sequence number bits.");
return false;
}
/* The log record header length must be a multiple of 8. */
if (((le16_to_cpu(ra->log_record_header_length) + 7) & ~7) !=
le16_to_cpu(ra->log_record_header_length)) {
ntfs_error(vi->i_sb, "$LogFile restart area specifies "
"inconsistent log record header length.");
return false;
}
/* Dito for the log page data offset. */
if (((le16_to_cpu(ra->log_page_data_offset) + 7) & ~7) !=
le16_to_cpu(ra->log_page_data_offset)) {
ntfs_error(vi->i_sb, "$LogFile restart area specifies "
"inconsistent log page data offset.");
return false;
}
ntfs_debug("Done.");
return true;
}
/**
* ntfs_check_log_client_array - check the log client array for consistency
* @vi: $LogFile inode to which the restart page belongs
* @rp: restart page whose log client array to check
*
* Check the log client array of the restart page @rp for consistency and
* return 'true' if it is consistent and 'false' otherwise.
*
* This function assumes that the restart page header and the restart area have
* already been consistency checked.
*
* Unlike ntfs_check_restart_page_header() and ntfs_check_restart_area(), this
* function needs @rp->system_page_size bytes in @rp, i.e. it requires the full
* restart page and the page must be multi sector transfer deprotected.
*/
static bool ntfs_check_log_client_array(struct inode *vi,
RESTART_PAGE_HEADER *rp)
{
RESTART_AREA *ra;
LOG_CLIENT_RECORD *ca, *cr;
u16 nr_clients, idx;
bool in_free_list, idx_is_first;
ntfs_debug("Entering.");
ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
ca = (LOG_CLIENT_RECORD*)((u8*)ra +
le16_to_cpu(ra->client_array_offset));
/*
* Check the ra->client_free_list first and then check the
* ra->client_in_use_list. Check each of the log client records in
* each of the lists and check that the array does not overflow the
* ra->log_clients value. Also keep track of the number of records
* visited as there cannot be more than ra->log_clients records and
* that way we detect eventual loops in within a list.
*/
nr_clients = le16_to_cpu(ra->log_clients);
idx = le16_to_cpu(ra->client_free_list);
in_free_list = true;
check_list:
for (idx_is_first = true; idx != LOGFILE_NO_CLIENT_CPU; nr_clients--,
idx = le16_to_cpu(cr->next_client)) {
if (!nr_clients || idx >= le16_to_cpu(ra->log_clients))
goto err_out;
/* Set @cr to the current log client record. */
cr = ca + idx;
/* The first log client record must not have a prev_client. */
if (idx_is_first) {
if (cr->prev_client != LOGFILE_NO_CLIENT)
goto err_out;
idx_is_first = false;
}
}
/* Switch to and check the in use list if we just did the free list. */
if (in_free_list) {
in_free_list = false;
idx = le16_to_cpu(ra->client_in_use_list);
goto check_list;
}
ntfs_debug("Done.");
return true;
err_out:
ntfs_error(vi->i_sb, "$LogFile log client array is corrupt.");
return false;
}
/**
* ntfs_check_and_load_restart_page - check the restart page for consistency
* @vi: $LogFile inode to which the restart page belongs
* @rp: restart page to check
* @pos: position in @vi at which the restart page resides
* @wrp: [OUT] copy of the multi sector transfer deprotected restart page
* @lsn: [OUT] set to the current logfile lsn on success
*
* Check the restart page @rp for consistency and return 0 if it is consistent
* and -errno otherwise. The restart page may have been modified by chkdsk in
* which case its magic is CHKD instead of RSTR.
*
* This function only needs NTFS_BLOCK_SIZE bytes in @rp, i.e. it does not
* require the full restart page.
*
* If @wrp is not NULL, on success, *@wrp will point to a buffer containing a
* copy of the complete multi sector transfer deprotected page. On failure,
* *@wrp is undefined.
*
* Simillarly, if @lsn is not NULL, on success *@lsn will be set to the current
* logfile lsn according to this restart page. On failure, *@lsn is undefined.
*
* The following error codes are defined:
* -EINVAL - The restart page is inconsistent.
* -ENOMEM - Not enough memory to load the restart page.
* -EIO - Failed to reading from $LogFile.
*/
static int ntfs_check_and_load_restart_page(struct inode *vi,
RESTART_PAGE_HEADER *rp, s64 pos, RESTART_PAGE_HEADER **wrp,
LSN *lsn)
{
RESTART_AREA *ra;
RESTART_PAGE_HEADER *trp;
int size, err;
ntfs_debug("Entering.");
/* Check the restart page header for consistency. */
if (!ntfs_check_restart_page_header(vi, rp, pos)) {
/* Error output already done inside the function. */
return -EINVAL;
}
/* Check the restart area for consistency. */
if (!ntfs_check_restart_area(vi, rp)) {
/* Error output already done inside the function. */
return -EINVAL;
}
ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
/*
* Allocate a buffer to store the whole restart page so we can multi
* sector transfer deprotect it.
*/
trp = ntfs_malloc_nofs(le32_to_cpu(rp->system_page_size));
if (!trp) {
ntfs_error(vi->i_sb, "Failed to allocate memory for $LogFile "
"restart page buffer.");
return -ENOMEM;
}
/*
* Read the whole of the restart page into the buffer. If it fits
* completely inside @rp, just copy it from there. Otherwise map all
* the required pages and copy the data from them.
*/
size = PAGE_SIZE - (pos & ~PAGE_MASK);
if (size >= le32_to_cpu(rp->system_page_size)) {
memcpy(trp, rp, le32_to_cpu(rp->system_page_size));
} else {
pgoff_t idx;
struct page *page;
int have_read, to_read;
/* First copy what we already have in @rp. */
memcpy(trp, rp, size);
/* Copy the remaining data one page at a time. */
have_read = size;
to_read = le32_to_cpu(rp->system_page_size) - size;
idx = (pos + size) >> PAGE_SHIFT;
BUG_ON((pos + size) & ~PAGE_MASK);
do {
page = ntfs_map_page(vi->i_mapping, idx);
if (IS_ERR(page)) {
ntfs_error(vi->i_sb, "Error mapping $LogFile "
"page (index %lu).", idx);
err = PTR_ERR(page);
if (err != -EIO && err != -ENOMEM)
err = -EIO;
goto err_out;
}
size = min_t(int, to_read, PAGE_SIZE);
memcpy((u8*)trp + have_read, page_address(page), size);
ntfs_unmap_page(page);
have_read += size;
to_read -= size;
idx++;
} while (to_read > 0);
}
/*
* Perform the multi sector transfer deprotection on the buffer if the
* restart page is protected.
*/
if ((!ntfs_is_chkd_record(trp->magic) || le16_to_cpu(trp->usa_count))
&& post_read_mst_fixup((NTFS_RECORD*)trp,
le32_to_cpu(rp->system_page_size))) {
/*
* A multi sector tranfer error was detected. We only need to
* abort if the restart page contents exceed the multi sector
* transfer fixup of the first sector.
*/
if (le16_to_cpu(rp->restart_area_offset) +
le16_to_cpu(ra->restart_area_length) >
NTFS_BLOCK_SIZE - sizeof(u16)) {
ntfs_error(vi->i_sb, "Multi sector transfer error "
"detected in $LogFile restart page.");
err = -EINVAL;
goto err_out;
}
}
/*
* If the restart page is modified by chkdsk or there are no active
* logfile clients, the logfile is consistent. Otherwise, need to
* check the log client records for consistency, too.
*/
err = 0;
if (ntfs_is_rstr_record(rp->magic) &&
ra->client_in_use_list != LOGFILE_NO_CLIENT) {
if (!ntfs_check_log_client_array(vi, trp)) {
err = -EINVAL;
goto err_out;
}
}
if (lsn) {
if (ntfs_is_rstr_record(rp->magic))
*lsn = sle64_to_cpu(ra->current_lsn);
else /* if (ntfs_is_chkd_record(rp->magic)) */
*lsn = sle64_to_cpu(rp->chkdsk_lsn);
}
ntfs_debug("Done.");
if (wrp)
*wrp = trp;
else {
err_out:
ntfs_free(trp);
}
return err;
}
/**
* ntfs_check_logfile - check the journal for consistency
* @log_vi: struct inode of loaded journal $LogFile to check
* @rp: [OUT] on success this is a copy of the current restart page
*
* Check the $LogFile journal for consistency and return 'true' if it is
* consistent and 'false' if not. On success, the current restart page is
* returned in *@rp. Caller must call ntfs_free(*@rp) when finished with it.
*
* At present we only check the two restart pages and ignore the log record
* pages.
*
* Note that the MstProtected flag is not set on the $LogFile inode and hence
* when reading pages they are not deprotected. This is because we do not know
* if the $LogFile was created on a system with a different page size to ours
* yet and mst deprotection would fail if our page size is smaller.
*/
bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp)
{
s64 size, pos;
LSN rstr1_lsn, rstr2_lsn;
ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
struct address_space *mapping = log_vi->i_mapping;
struct page *page = NULL;
u8 *kaddr = NULL;
RESTART_PAGE_HEADER *rstr1_ph = NULL;
RESTART_PAGE_HEADER *rstr2_ph = NULL;
int log_page_size, err;
bool logfile_is_empty = true;
u8 log_page_bits;
ntfs_debug("Entering.");
/* An empty $LogFile must have been clean before it got emptied. */
if (NVolLogFileEmpty(vol))
goto is_empty;
size = i_size_read(log_vi);
/* Make sure the file doesn't exceed the maximum allowed size. */
if (size > MaxLogFileSize)
size = MaxLogFileSize;
/*
* Truncate size to a multiple of the page cache size or the default
* log page size if the page cache size is between the default log page
* log page size if the page cache size is between the default log page
* size and twice that.
*/
if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <=
DefaultLogPageSize * 2)
log_page_size = DefaultLogPageSize;
else
log_page_size = PAGE_SIZE;
/*
* Use ntfs_ffs() instead of ffs() to enable the compiler to
* optimize log_page_size and log_page_bits into constants.
*/
log_page_bits = ntfs_ffs(log_page_size) - 1;
size &= ~(s64)(log_page_size - 1);
/*
* Ensure the log file is big enough to store at least the two restart
* pages and the minimum number of log record pages.
*/
if (size < log_page_size * 2 || (size - log_page_size * 2) >>
log_page_bits < MinLogRecordPages) {
ntfs_error(vol->sb, "$LogFile is too small.");
return false;
}
/*
* Read through the file looking for a restart page. Since the restart
* page header is at the beginning of a page we only need to search at
* what could be the beginning of a page (for each page size) rather
* than scanning the whole file byte by byte. If all potential places
* contain empty and uninitialzed records, the log file can be assumed
* to be empty.
*/
for (pos = 0; pos < size; pos <<= 1) {
pgoff_t idx = pos >> PAGE_SHIFT;
if (!page || page->index != idx) {
if (page)
ntfs_unmap_page(page);
page = ntfs_map_page(mapping, idx);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Error mapping $LogFile "
"page (index %lu).", idx);
goto err_out;
}
}
kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK);
/*
* A non-empty block means the logfile is not empty while an
* empty block after a non-empty block has been encountered
* means we are done.
*/
if (!ntfs_is_empty_recordp((le32*)kaddr))
logfile_is_empty = false;
else if (!logfile_is_empty)
break;
/*
* A log record page means there cannot be a restart page after
* this so no need to continue searching.
*/
if (ntfs_is_rcrd_recordp((le32*)kaddr))
break;
/* If not a (modified by chkdsk) restart page, continue. */
if (!ntfs_is_rstr_recordp((le32*)kaddr) &&
!ntfs_is_chkd_recordp((le32*)kaddr)) {
if (!pos)
pos = NTFS_BLOCK_SIZE >> 1;
continue;
}
/*
* Check the (modified by chkdsk) restart page for consistency
* and get a copy of the complete multi sector transfer
* deprotected restart page.
*/
err = ntfs_check_and_load_restart_page(log_vi,
(RESTART_PAGE_HEADER*)kaddr, pos,
!rstr1_ph ? &rstr1_ph : &rstr2_ph,
!rstr1_ph ? &rstr1_lsn : &rstr2_lsn);
if (!err) {
/*
* If we have now found the first (modified by chkdsk)
* restart page, continue looking for the second one.
*/
if (!pos) {
pos = NTFS_BLOCK_SIZE >> 1;
continue;
}
/*
* We have now found the second (modified by chkdsk)
* restart page, so we can stop looking.
*/
break;
}
/*
* Error output already done inside the function. Note, we do
* not abort if the restart page was invalid as we might still
* find a valid one further in the file.
*/
if (err != -EINVAL) {
ntfs_unmap_page(page);
goto err_out;
}
/* Continue looking. */
if (!pos)
pos = NTFS_BLOCK_SIZE >> 1;
}
if (page)
ntfs_unmap_page(page);
if (logfile_is_empty) {
NVolSetLogFileEmpty(vol);
is_empty:
ntfs_debug("Done. ($LogFile is empty.)");
return true;
}
if (!rstr1_ph) {
BUG_ON(rstr2_ph);
ntfs_error(vol->sb, "Did not find any restart pages in "
"$LogFile and it was not empty.");
return false;
}
/* If both restart pages were found, use the more recent one. */
if (rstr2_ph) {
/*
* If the second restart area is more recent, switch to it.
* Otherwise just throw it away.
*/
if (rstr2_lsn > rstr1_lsn) {
ntfs_debug("Using second restart page as it is more "
"recent.");
ntfs_free(rstr1_ph);
rstr1_ph = rstr2_ph;
/* rstr1_lsn = rstr2_lsn; */
} else {
ntfs_debug("Using first restart page as it is more "
"recent.");
ntfs_free(rstr2_ph);
}
rstr2_ph = NULL;
}
/* All consistency checks passed. */
if (rp)
*rp = rstr1_ph;
else
ntfs_free(rstr1_ph);
ntfs_debug("Done.");
return true;
err_out:
if (rstr1_ph)
ntfs_free(rstr1_ph);
return false;
}
/**
* ntfs_is_logfile_clean - check in the journal if the volume is clean
* @log_vi: struct inode of loaded journal $LogFile to check
* @rp: copy of the current restart page
*
* Analyze the $LogFile journal and return 'true' if it indicates the volume was
* shutdown cleanly and 'false' if not.
*
* At present we only look at the two restart pages and ignore the log record
* pages. This is a little bit crude in that there will be a very small number
* of cases where we think that a volume is dirty when in fact it is clean.
* This should only affect volumes that have not been shutdown cleanly but did
* not have any pending, non-check-pointed i/o, i.e. they were completely idle
* at least for the five seconds preceding the unclean shutdown.
*
* This function assumes that the $LogFile journal has already been consistency
* checked by a call to ntfs_check_logfile() and in particular if the $LogFile
* is empty this function requires that NVolLogFileEmpty() is true otherwise an
* empty volume will be reported as dirty.
*/
bool ntfs_is_logfile_clean(struct inode *log_vi, const RESTART_PAGE_HEADER *rp)
{
ntfs_volume *vol = NTFS_SB(log_vi->i_sb);
RESTART_AREA *ra;
ntfs_debug("Entering.");
/* An empty $LogFile must have been clean before it got emptied. */
if (NVolLogFileEmpty(vol)) {
ntfs_debug("Done. ($LogFile is empty.)");
return true;
}
BUG_ON(!rp);
if (!ntfs_is_rstr_record(rp->magic) &&
!ntfs_is_chkd_record(rp->magic)) {
ntfs_error(vol->sb, "Restart page buffer is invalid. This is "
"probably a bug in that the $LogFile should "
"have been consistency checked before calling "
"this function.");
return false;
}
ra = (RESTART_AREA*)((u8*)rp + le16_to_cpu(rp->restart_area_offset));
/*
* If the $LogFile has active clients, i.e. it is open, and we do not
* have the RESTART_VOLUME_IS_CLEAN bit set in the restart area flags,
* we assume there was an unclean shutdown.
*/
if (ra->client_in_use_list != LOGFILE_NO_CLIENT &&
!(ra->flags & RESTART_VOLUME_IS_CLEAN)) {
ntfs_debug("Done. $LogFile indicates a dirty shutdown.");
return false;
}
/* $LogFile indicates a clean shutdown. */
ntfs_debug("Done. $LogFile indicates a clean shutdown.");
return true;
}
/**
* ntfs_empty_logfile - empty the contents of the $LogFile journal
* @log_vi: struct inode of loaded journal $LogFile to empty
*
* Empty the contents of the $LogFile journal @log_vi and return 'true' on
* success and 'false' on error.
*
* This function assumes that the $LogFile journal has already been consistency
* checked by a call to ntfs_check_logfile() and that ntfs_is_logfile_clean()
* has been used to ensure that the $LogFile is clean.
*/
bool ntfs_empty_logfile(struct inode *log_vi)
{
VCN vcn, end_vcn;
ntfs_inode *log_ni = NTFS_I(log_vi);
ntfs_volume *vol = log_ni->vol;
struct super_block *sb = vol->sb;
runlist_element *rl;
unsigned long flags;
unsigned block_size, block_size_bits;
int err;
bool should_wait = true;
ntfs_debug("Entering.");
if (NVolLogFileEmpty(vol)) {
ntfs_debug("Done.");
return true;
}
/*
* We cannot use ntfs_attr_set() because we may be still in the middle
* of a mount operation. Thus we do the emptying by hand by first
* zapping the page cache pages for the $LogFile/$DATA attribute and
* then emptying each of the buffers in each of the clusters specified
* by the runlist by hand.
*/
block_size = sb->s_blocksize;
block_size_bits = sb->s_blocksize_bits;
vcn = 0;
read_lock_irqsave(&log_ni->size_lock, flags);
end_vcn = (log_ni->initialized_size + vol->cluster_size_mask) >>
vol->cluster_size_bits;
read_unlock_irqrestore(&log_ni->size_lock, flags);
truncate_inode_pages(log_vi->i_mapping, 0);
down_write(&log_ni->runlist.lock);
rl = log_ni->runlist.rl;
if (unlikely(!rl || vcn < rl->vcn || !rl->length)) {
map_vcn:
err = ntfs_map_runlist_nolock(log_ni, vcn, NULL);
if (err) {
ntfs_error(sb, "Failed to map runlist fragment (error "
"%d).", -err);
goto err;
}
rl = log_ni->runlist.rl;
BUG_ON(!rl || vcn < rl->vcn || !rl->length);
}
/* Seek to the runlist element containing @vcn. */
while (rl->length && vcn >= rl[1].vcn)
rl++;
do {
LCN lcn;
sector_t block, end_block;
s64 len;
/*
* If this run is not mapped map it now and start again as the
* runlist will have been updated.
*/
lcn = rl->lcn;
if (unlikely(lcn == LCN_RL_NOT_MAPPED)) {
vcn = rl->vcn;
goto map_vcn;
}
/* If this run is not valid abort with an error. */
if (unlikely(!rl->length || lcn < LCN_HOLE))
goto rl_err;
/* Skip holes. */
if (lcn == LCN_HOLE)
continue;
block = lcn << vol->cluster_size_bits >> block_size_bits;
len = rl->length;
if (rl[1].vcn > end_vcn)
len = end_vcn - rl->vcn;
end_block = (lcn + len) << vol->cluster_size_bits >>
block_size_bits;
/* Iterate over the blocks in the run and empty them. */
do {
struct buffer_head *bh;
/* Obtain the buffer, possibly not uptodate. */
bh = sb_getblk(sb, block);
BUG_ON(!bh);
/* Setup buffer i/o submission. */
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
/* Set the entire contents of the buffer to 0xff. */
memset(bh->b_data, -1, block_size);
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
if (buffer_dirty(bh))
clear_buffer_dirty(bh);
/*
* Submit the buffer and wait for i/o to complete but
* only for the first buffer so we do not miss really
* serious i/o errors. Once the first buffer has
* completed ignore errors afterwards as we can assume
* that if one buffer worked all of them will work.
*/
submit_bh(REQ_OP_WRITE, bh);
if (should_wait) {
should_wait = false;
wait_on_buffer(bh);
if (unlikely(!buffer_uptodate(bh)))
goto io_err;
}
brelse(bh);
} while (++block < end_block);
} while ((++rl)->vcn < end_vcn);
up_write(&log_ni->runlist.lock);
/*
* Zap the pages again just in case any got instantiated whilst we were
* emptying the blocks by hand. FIXME: We may not have completed
* writing to all the buffer heads yet so this may happen too early.
* We really should use a kernel thread to do the emptying
* asynchronously and then we can also set the volume dirty and output
* an error message if emptying should fail.
*/
truncate_inode_pages(log_vi->i_mapping, 0);
/* Set the flag so we do not have to do it again on remount. */
NVolSetLogFileEmpty(vol);
ntfs_debug("Done.");
return true;
io_err:
ntfs_error(sb, "Failed to write buffer. Unmount and run chkdsk.");
goto dirty_err;
rl_err:
ntfs_error(sb, "Runlist is corrupt. Unmount and run chkdsk.");
dirty_err:
NVolSetErrors(vol);
err = -EIO;
err:
up_write(&log_ni->runlist.lock);
ntfs_error(sb, "Failed to fill $LogFile with 0xff bytes (error %d).",
-err);
return false;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/logfile.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS
* project.
*
* Copyright (c) 2001-2006 Anton Altaparmakov
*/
#include <linux/dcache.h>
#include <linux/exportfs.h>
#include <linux/security.h>
#include <linux/slab.h>
#include "attrib.h"
#include "debug.h"
#include "dir.h"
#include "mft.h"
#include "ntfs.h"
/**
* ntfs_lookup - find the inode represented by a dentry in a directory inode
* @dir_ino: directory inode in which to look for the inode
* @dent: dentry representing the inode to look for
* @flags: lookup flags
*
* In short, ntfs_lookup() looks for the inode represented by the dentry @dent
* in the directory inode @dir_ino and if found attaches the inode to the
* dentry @dent.
*
* In more detail, the dentry @dent specifies which inode to look for by
* supplying the name of the inode in @dent->d_name.name. ntfs_lookup()
* converts the name to Unicode and walks the contents of the directory inode
* @dir_ino looking for the converted Unicode name. If the name is found in the
* directory, the corresponding inode is loaded by calling ntfs_iget() on its
* inode number and the inode is associated with the dentry @dent via a call to
* d_splice_alias().
*
* If the name is not found in the directory, a NULL inode is inserted into the
* dentry @dent via a call to d_add(). The dentry is then termed a negative
* dentry.
*
* Only if an actual error occurs, do we return an error via ERR_PTR().
*
* In order to handle the case insensitivity issues of NTFS with regards to the
* dcache and the dcache requiring only one dentry per directory, we deal with
* dentry aliases that only differ in case in ->ntfs_lookup() while maintaining
* a case sensitive dcache. This means that we get the full benefit of dcache
* speed when the file/directory is looked up with the same case as returned by
* ->ntfs_readdir() but that a lookup for any other case (or for the short file
* name) will not find anything in dcache and will enter ->ntfs_lookup()
* instead, where we search the directory for a fully matching file name
* (including case) and if that is not found, we search for a file name that
* matches with different case and if that has non-POSIX semantics we return
* that. We actually do only one search (case sensitive) and keep tabs on
* whether we have found a case insensitive match in the process.
*
* To simplify matters for us, we do not treat the short vs long filenames as
* two hard links but instead if the lookup matches a short filename, we
* return the dentry for the corresponding long filename instead.
*
* There are three cases we need to distinguish here:
*
* 1) @dent perfectly matches (i.e. including case) a directory entry with a
* file name in the WIN32 or POSIX namespaces. In this case
* ntfs_lookup_inode_by_name() will return with name set to NULL and we
* just d_splice_alias() @dent.
* 2) @dent matches (not including case) a directory entry with a file name in
* the WIN32 namespace. In this case ntfs_lookup_inode_by_name() will return
* with name set to point to a kmalloc()ed ntfs_name structure containing
* the properly cased little endian Unicode name. We convert the name to the
* current NLS code page, search if a dentry with this name already exists
* and if so return that instead of @dent. At this point things are
* complicated by the possibility of 'disconnected' dentries due to NFS
* which we deal with appropriately (see the code comments). The VFS will
* then destroy the old @dent and use the one we returned. If a dentry is
* not found, we allocate a new one, d_splice_alias() it, and return it as
* above.
* 3) @dent matches either perfectly or not (i.e. we don't care about case) a
* directory entry with a file name in the DOS namespace. In this case
* ntfs_lookup_inode_by_name() will return with name set to point to a
* kmalloc()ed ntfs_name structure containing the mft reference (cpu endian)
* of the inode. We use the mft reference to read the inode and to find the
* file name in the WIN32 namespace corresponding to the matched short file
* name. We then convert the name to the current NLS code page, and proceed
* searching for a dentry with this name, etc, as in case 2), above.
*
* Locking: Caller must hold i_mutex on the directory.
*/
static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
unsigned int flags)
{
ntfs_volume *vol = NTFS_SB(dir_ino->i_sb);
struct inode *dent_inode;
ntfschar *uname;
ntfs_name *name = NULL;
MFT_REF mref;
unsigned long dent_ino;
int uname_len;
ntfs_debug("Looking up %pd in directory inode 0x%lx.",
dent, dir_ino->i_ino);
/* Convert the name of the dentry to Unicode. */
uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len,
&uname);
if (uname_len < 0) {
if (uname_len != -ENAMETOOLONG)
ntfs_error(vol->sb, "Failed to convert name to "
"Unicode.");
return ERR_PTR(uname_len);
}
mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len,
&name);
kmem_cache_free(ntfs_name_cache, uname);
if (!IS_ERR_MREF(mref)) {
dent_ino = MREF(mref);
ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
dent_inode = ntfs_iget(vol->sb, dent_ino);
if (!IS_ERR(dent_inode)) {
/* Consistency check. */
if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
NTFS_I(dent_inode)->seq_no ||
dent_ino == FILE_MFT) {
/* Perfect WIN32/POSIX match. -- Case 1. */
if (!name) {
ntfs_debug("Done. (Case 1.)");
return d_splice_alias(dent_inode, dent);
}
/*
* We are too indented. Handle imperfect
* matches and short file names further below.
*/
goto handle_name;
}
ntfs_error(vol->sb, "Found stale reference to inode "
"0x%lx (reference sequence number = "
"0x%x, inode sequence number = 0x%x), "
"returning -EIO. Run chkdsk.",
dent_ino, MSEQNO(mref),
NTFS_I(dent_inode)->seq_no);
iput(dent_inode);
dent_inode = ERR_PTR(-EIO);
} else
ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with "
"error code %li.", dent_ino,
PTR_ERR(dent_inode));
kfree(name);
/* Return the error code. */
return ERR_CAST(dent_inode);
}
/* It is guaranteed that @name is no longer allocated at this point. */
if (MREF_ERR(mref) == -ENOENT) {
ntfs_debug("Entry was not found, adding negative dentry.");
/* The dcache will handle negative entries. */
d_add(dent, NULL);
ntfs_debug("Done.");
return NULL;
}
ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error "
"code %i.", -MREF_ERR(mref));
return ERR_PTR(MREF_ERR(mref));
// TODO: Consider moving this lot to a separate function! (AIA)
handle_name:
{
MFT_RECORD *m;
ntfs_attr_search_ctx *ctx;
ntfs_inode *ni = NTFS_I(dent_inode);
int err;
struct qstr nls_name;
nls_name.name = NULL;
if (name->type != FILE_NAME_DOS) { /* Case 2. */
ntfs_debug("Case 2.");
nls_name.len = (unsigned)ntfs_ucstonls(vol,
(ntfschar*)&name->name, name->len,
(unsigned char**)&nls_name.name, 0);
kfree(name);
} else /* if (name->type == FILE_NAME_DOS) */ { /* Case 3. */
FILE_NAME_ATTR *fn;
ntfs_debug("Case 3.");
kfree(name);
/* Find the WIN32 name corresponding to the matched DOS name. */
ni = NTFS_I(dent_inode);
m = map_mft_record(ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
do {
ATTR_RECORD *a;
u32 val_len;
err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0,
NULL, 0, ctx);
if (unlikely(err)) {
ntfs_error(vol->sb, "Inode corrupt: No WIN32 "
"namespace counterpart to DOS "
"file name. Run chkdsk.");
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
/* Consistency checks. */
a = ctx->attr;
if (a->non_resident || a->flags)
goto eio_err_out;
val_len = le32_to_cpu(a->data.resident.value_length);
if (le16_to_cpu(a->data.resident.value_offset) +
val_len > le32_to_cpu(a->length))
goto eio_err_out;
fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu(
ctx->attr->data.resident.value_offset));
if ((u32)(fn->file_name_length * sizeof(ntfschar) +
sizeof(FILE_NAME_ATTR)) > val_len)
goto eio_err_out;
} while (fn->file_name_type != FILE_NAME_WIN32);
/* Convert the found WIN32 name to current NLS code page. */
nls_name.len = (unsigned)ntfs_ucstonls(vol,
(ntfschar*)&fn->file_name, fn->file_name_length,
(unsigned char**)&nls_name.name, 0);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
}
m = NULL;
ctx = NULL;
/* Check if a conversion error occurred. */
if ((signed)nls_name.len < 0) {
err = (signed)nls_name.len;
goto err_out;
}
nls_name.hash = full_name_hash(dent, nls_name.name, nls_name.len);
dent = d_add_ci(dent, dent_inode, &nls_name);
kfree(nls_name.name);
return dent;
eio_err_out:
ntfs_error(vol->sb, "Illegal file name attribute. Run chkdsk.");
err = -EIO;
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(ni);
iput(dent_inode);
ntfs_error(vol->sb, "Failed, returning error code %i.", err);
return ERR_PTR(err);
}
}
/*
* Inode operations for directories.
*/
const struct inode_operations ntfs_dir_inode_ops = {
.lookup = ntfs_lookup, /* VFS: Lookup directory. */
};
/**
* ntfs_get_parent - find the dentry of the parent of a given directory dentry
* @child_dent: dentry of the directory whose parent directory to find
*
* Find the dentry for the parent directory of the directory specified by the
* dentry @child_dent. This function is called from
* fs/exportfs/expfs.c::find_exported_dentry() which in turn is called from the
* default ->decode_fh() which is export_decode_fh() in the same file.
*
* The code is based on the ext3 ->get_parent() implementation found in
* fs/ext3/namei.c::ext3_get_parent().
*
* Note: ntfs_get_parent() is called with @d_inode(child_dent)->i_mutex down.
*
* Return the dentry of the parent directory on success or the error code on
* error (IS_ERR() is true).
*/
static struct dentry *ntfs_get_parent(struct dentry *child_dent)
{
struct inode *vi = d_inode(child_dent);
ntfs_inode *ni = NTFS_I(vi);
MFT_RECORD *mrec;
ntfs_attr_search_ctx *ctx;
ATTR_RECORD *attr;
FILE_NAME_ATTR *fn;
unsigned long parent_ino;
int err;
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
/* Get the mft record of the inode belonging to the child dentry. */
mrec = map_mft_record(ni);
if (IS_ERR(mrec))
return ERR_CAST(mrec);
/* Find the first file name attribute in the mft record. */
ctx = ntfs_attr_get_search_ctx(ni, mrec);
if (unlikely(!ctx)) {
unmap_mft_record(ni);
return ERR_PTR(-ENOMEM);
}
try_next:
err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, CASE_SENSITIVE, 0, NULL,
0, ctx);
if (unlikely(err)) {
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
if (err == -ENOENT)
ntfs_error(vi->i_sb, "Inode 0x%lx does not have a "
"file name attribute. Run chkdsk.",
vi->i_ino);
return ERR_PTR(err);
}
attr = ctx->attr;
if (unlikely(attr->non_resident))
goto try_next;
fn = (FILE_NAME_ATTR *)((u8 *)attr +
le16_to_cpu(attr->data.resident.value_offset));
if (unlikely((u8 *)fn + le32_to_cpu(attr->data.resident.value_length) >
(u8*)attr + le32_to_cpu(attr->length)))
goto try_next;
/* Get the inode number of the parent directory. */
parent_ino = MREF_LE(fn->parent_directory);
/* Release the search context and the mft record of the child. */
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(ni);
return d_obtain_alias(ntfs_iget(vi->i_sb, parent_ino));
}
static struct inode *ntfs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
struct inode *inode;
inode = ntfs_iget(sb, ino);
if (!IS_ERR(inode)) {
if (is_bad_inode(inode) || inode->i_generation != generation) {
iput(inode);
inode = ERR_PTR(-ESTALE);
}
}
return inode;
}
static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
ntfs_nfs_get_inode);
}
static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
ntfs_nfs_get_inode);
}
/*
* Export operations allowing NFS exporting of mounted NTFS partitions.
*
* We use the default ->encode_fh() for now. Note that they
* use 32 bits to store the inode number which is an unsigned long so on 64-bit
* architectures is usually 64 bits so it would all fail horribly on huge
* volumes. I guess we need to define our own encode and decode fh functions
* that store 64-bit inode numbers at some point but for now we will ignore the
* problem...
*
* We also use the default ->get_name() helper (used by ->decode_fh() via
* fs/exportfs/expfs.c::find_exported_dentry()) as that is completely fs
* independent.
*
* The default ->get_parent() just returns -EACCES so we have to provide our
* own and the default ->get_dentry() is incompatible with NTFS due to not
* allowing the inode number 0 which is used in NTFS for the system file $MFT
* and due to using iget() whereas NTFS needs ntfs_iget().
*/
const struct export_operations ntfs_export_ops = {
.get_parent = ntfs_get_parent, /* Find the parent of a given
directory. */
.fh_to_dentry = ntfs_fh_to_dentry,
.fh_to_parent = ntfs_fh_to_parent,
};
| linux-master | fs/ntfs/namei.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* aops.c - NTFS kernel address space operations and page cache handling.
*
* Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2002 Richard Russon
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
#include <linux/bio.h>
#include "aops.h"
#include "attrib.h"
#include "debug.h"
#include "inode.h"
#include "mft.h"
#include "runlist.h"
#include "types.h"
#include "ntfs.h"
/**
* ntfs_end_buffer_async_read - async io completion for reading attributes
* @bh: buffer head on which io is completed
* @uptodate: whether @bh is now uptodate or not
*
* Asynchronous I/O completion handler for reading pages belonging to the
* attribute address space of an inode. The inodes can either be files or
* directories or they can be fake inodes describing some attribute.
*
* If NInoMstProtected(), perform the post read mst fixups when all IO on the
* page has been completed and mark the page uptodate or set the error bit on
* the page. To determine the size of the records that need fixing up, we
* cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
* record size, and index_block_size_bits, to the log(base 2) of the ntfs
* record size.
*/
static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
struct buffer_head *first, *tmp;
struct page *page;
struct inode *vi;
ntfs_inode *ni;
int page_uptodate = 1;
page = bh->b_page;
vi = page->mapping->host;
ni = NTFS_I(vi);
if (likely(uptodate)) {
loff_t i_size;
s64 file_ofs, init_size;
set_buffer_uptodate(bh);
file_ofs = ((s64)page->index << PAGE_SHIFT) +
bh_offset(bh);
read_lock_irqsave(&ni->size_lock, flags);
init_size = ni->initialized_size;
i_size = i_size_read(vi);
read_unlock_irqrestore(&ni->size_lock, flags);
if (unlikely(init_size > i_size)) {
/* Race with shrinking truncate. */
init_size = i_size;
}
/* Check for the current buffer head overflowing. */
if (unlikely(file_ofs + bh->b_size > init_size)) {
int ofs;
void *kaddr;
ofs = 0;
if (file_ofs < init_size)
ofs = init_size - file_ofs;
kaddr = kmap_atomic(page);
memset(kaddr + bh_offset(bh) + ofs, 0,
bh->b_size - ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
}
} else {
clear_buffer_uptodate(bh);
SetPageError(page);
ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
spin_lock_irqsave(&first->b_uptodate_lock, flags);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
do {
if (!buffer_uptodate(tmp))
page_uptodate = 0;
if (buffer_async_read(tmp)) {
if (likely(buffer_locked(tmp)))
goto still_busy;
/* Async buffers must be locked. */
BUG();
}
tmp = tmp->b_this_page;
} while (tmp != bh);
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
* attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
* Note we ignore fixup errors as those are detected when
* map_mft_record() is called which gives us per record granularity
* rather than per page granularity.
*/
if (!NInoMstProtected(ni)) {
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
} else {
u8 *kaddr;
unsigned int i, recs;
u32 rec_size;
rec_size = ni->itype.index.block_size;
recs = PAGE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
}
unlock_page(page);
return;
still_busy:
spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
return;
}
/**
* ntfs_read_block - fill a @page of an address space with data
* @page: page cache page to fill with data
*
* Fill the page @page of the address space belonging to the @page->host inode.
* We read each buffer asynchronously and when all buffers are read in, our io
* completion handler ntfs_end_buffer_read_async(), if required, automatically
* applies the mst fixups to the page before finally marking it uptodate and
* unlocking it.
*
* We only enforce allocated_size limit because i_size is checked for in
* generic_file_read().
*
* Return 0 on success and -errno on error.
*
* Contains an adapted version of fs/buffer.c::block_read_full_folio().
*/
static int ntfs_read_block(struct page *page)
{
loff_t i_size;
VCN vcn;
LCN lcn;
s64 init_size;
struct inode *vi;
ntfs_inode *ni;
ntfs_volume *vol;
runlist_element *rl;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
sector_t iblock, lblock, zblock;
unsigned long flags;
unsigned int blocksize, vcn_ofs;
int i, nr;
unsigned char blocksize_bits;
vi = page->mapping->host;
ni = NTFS_I(vi);
vol = ni->vol;
/* $MFT/$DATA must have its complete runlist in memory at all times. */
BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
if (!page_has_buffers(page)) {
create_empty_buffers(page, blocksize, 0);
if (unlikely(!page_has_buffers(page))) {
unlock_page(page);
return -ENOMEM;
}
}
bh = head = page_buffers(page);
BUG_ON(!bh);
/*
* We may be racing with truncate. To avoid some of the problems we
* now take a snapshot of the various sizes and use those for the whole
* of the function. In case of an extending truncate it just means we
* may leave some buffers unmapped which are now allocated. This is
* not a problem since these buffers will just get mapped when a write
* occurs. In case of a shrinking truncate, we will detect this later
* on due to the runlist being incomplete and if the page is being
* fully truncated, truncate will throw it away as soon as we unlock
* it so no need to worry what we do with it.
*/
iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
read_lock_irqsave(&ni->size_lock, flags);
lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
init_size = ni->initialized_size;
i_size = i_size_read(vi);
read_unlock_irqrestore(&ni->size_lock, flags);
if (unlikely(init_size > i_size)) {
/* Race with shrinking truncate. */
init_size = i_size;
}
zblock = (init_size + blocksize - 1) >> blocksize_bits;
/* Loop through all the buffers in the page. */
rl = NULL;
nr = i = 0;
do {
int err = 0;
if (unlikely(buffer_uptodate(bh)))
continue;
if (unlikely(buffer_mapped(bh))) {
arr[nr++] = bh;
continue;
}
bh->b_bdev = vol->sb->s_bdev;
/* Is the block within the allowed limits? */
if (iblock < lblock) {
bool is_retry = false;
/* Convert iblock into corresponding vcn and offset. */
vcn = (VCN)iblock << blocksize_bits >>
vol->cluster_size_bits;
vcn_ofs = ((VCN)iblock << blocksize_bits) &
vol->cluster_size_mask;
if (!rl) {
lock_retry_remap:
down_read(&ni->runlist.lock);
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
lcn = LCN_RL_NOT_MAPPED;
/* Successful remap. */
if (lcn >= 0) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
/* Only read initialized data blocks. */
if (iblock < zblock) {
arr[nr++] = bh;
continue;
}
/* Fully non-initialized data block, zero it. */
goto handle_zblock;
}
/* It is a hole, need to zero it. */
if (lcn == LCN_HOLE)
goto handle_hole;
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = true;
/*
* Attempt to map runlist, dropping lock for
* the duration.
*/
up_read(&ni->runlist.lock);
err = ntfs_map_runlist(ni, vcn);
if (likely(!err))
goto lock_retry_remap;
rl = NULL;
} else if (!rl)
up_read(&ni->runlist.lock);
/*
* If buffer is outside the runlist, treat it as a
* hole. This can happen due to concurrent truncate
* for example.
*/
if (err == -ENOENT || lcn == LCN_ENOENT) {
err = 0;
goto handle_hole;
}
/* Hard error, zero out region. */
if (!err)
err = -EIO;
bh->b_blocknr = -1;
SetPageError(page);
ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
"attribute type 0x%x, vcn 0x%llx, "
"offset 0x%x because its location on "
"disk could not be determined%s "
"(error code %i).", ni->mft_no,
ni->type, (unsigned long long)vcn,
vcn_ofs, is_retry ? " even after "
"retrying" : "", err);
}
/*
* Either iblock was outside lblock limits or
* ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
* of the page and set the buffer uptodate.
*/
handle_hole:
bh->b_blocknr = -1UL;
clear_buffer_mapped(bh);
handle_zblock:
zero_user(page, i * blocksize, blocksize);
if (likely(!err))
set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head);
/* Release the lock if we took it. */
if (rl)
up_read(&ni->runlist.lock);
/* Check we have at least one buffer ready for i/o. */
if (nr) {
struct buffer_head *tbh;
/* Lock the buffers. */
for (i = 0; i < nr; i++) {
tbh = arr[i];
lock_buffer(tbh);
tbh->b_end_io = ntfs_end_buffer_async_read;
set_buffer_async_read(tbh);
}
/* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++) {
tbh = arr[i];
if (likely(!buffer_uptodate(tbh)))
submit_bh(REQ_OP_READ, tbh);
else
ntfs_end_buffer_async_read(tbh, 1);
}
return 0;
}
/* No i/o was scheduled on any of the buffers. */
if (likely(!PageError(page)))
SetPageUptodate(page);
else /* Signal synchronous i/o error. */
nr = -EIO;
unlock_page(page);
return nr;
}
/**
* ntfs_read_folio - fill a @folio of a @file with data from the device
* @file: open file to which the folio @folio belongs or NULL
* @folio: page cache folio to fill with data
*
* For non-resident attributes, ntfs_read_folio() fills the @folio of the open
* file @file by calling the ntfs version of the generic block_read_full_folio()
* function, ntfs_read_block(), which in turn creates and reads in the buffers
* associated with the folio asynchronously.
*
* For resident attributes, OTOH, ntfs_read_folio() fills @folio by copying the
* data from the mft record (which at this stage is most likely in memory) and
* fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
* even if the mft record is not cached at this point in time, we need to wait
* for it to be read in before we can do the copy.
*
* Return 0 on success and -errno on error.
*/
static int ntfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
loff_t i_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
u8 *addr;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *mrec;
unsigned long flags;
u32 attr_len;
int err = 0;
retry_readpage:
BUG_ON(!PageLocked(page));
vi = page->mapping->host;
i_size = i_size_read(vi);
/* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT)) {
zero_user(page, 0, PAGE_SIZE);
ntfs_debug("Read outside i_size - truncated?");
goto done;
}
/*
* This can potentially happen because we clear PageUptodate() during
* ntfs_writepage() of MstProtected() attributes.
*/
if (PageUptodate(page)) {
unlock_page(page);
return 0;
}
ni = NTFS_I(vi);
/*
* Only $DATA attributes can be encrypted and only unnamed $DATA
* attributes can be compressed. Index root can have the flags set but
* this means to create compressed/encrypted files, not that the
* attribute is compressed/encrypted. Note we need to check for
* AT_INDEX_ALLOCATION since this is the type of both directory and
* index inodes.
*/
if (ni->type != AT_INDEX_ALLOCATION) {
/* If attribute is encrypted, deny access, just like NT4. */
if (NInoEncrypted(ni)) {
BUG_ON(ni->type != AT_DATA);
err = -EACCES;
goto err_out;
}
/* Compressed data streams are handled in compress.c. */
if (NInoNonResident(ni) && NInoCompressed(ni)) {
BUG_ON(ni->type != AT_DATA);
BUG_ON(ni->name_len);
return ntfs_read_compressed_block(page);
}
}
/* NInoNonResident() == NInoIndexAllocPresent() */
if (NInoNonResident(ni)) {
/* Normal, non-resident data stream. */
return ntfs_read_block(page);
}
/*
* Attribute is resident, implying it is not compressed or encrypted.
* This also means the attribute is smaller than an mft record and
* hence smaller than a page, so can simply zero out any pages with
* index above 0. Note the attribute can actually be marked compressed
* but if it is resident the actual data is not compressed so we are
* ok to ignore the compressed flag here.
*/
if (unlikely(page->index > 0)) {
zero_user(page, 0, PAGE_SIZE);
goto done;
}
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/* Map, pin, and lock the mft record. */
mrec = map_mft_record(base_ni);
if (IS_ERR(mrec)) {
err = PTR_ERR(mrec);
goto err_out;
}
/*
* If a parallel write made the attribute non-resident, drop the mft
* record and retry the read_folio.
*/
if (unlikely(NInoNonResident(ni))) {
unmap_mft_record(base_ni);
goto retry_readpage;
}
ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto unm_err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto put_unm_err_out;
attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
read_lock_irqsave(&ni->size_lock, flags);
if (unlikely(attr_len > ni->initialized_size))
attr_len = ni->initialized_size;
i_size = i_size_read(vi);
read_unlock_irqrestore(&ni->size_lock, flags);
if (unlikely(attr_len > i_size)) {
/* Race with shrinking truncate. */
attr_len = i_size;
}
addr = kmap_atomic(page);
/* Copy the data to the page. */
memcpy(addr, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
attr_len);
/* Zero the remainder of the page. */
memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
flush_dcache_page(page);
kunmap_atomic(addr);
put_unm_err_out:
ntfs_attr_put_search_ctx(ctx);
unm_err_out:
unmap_mft_record(base_ni);
done:
SetPageUptodate(page);
err_out:
unlock_page(page);
return err;
}
#ifdef NTFS_RW
/**
* ntfs_write_block - write a @page to the backing store
* @page: page cache page to write out
* @wbc: writeback control structure
*
* This function is for writing pages belonging to non-resident, non-mst
* protected attributes to their backing store.
*
* For a page with buffers, map and write the dirty buffers asynchronously
* under page writeback. For a page without buffers, create buffers for the
* page, then proceed as above.
*
* If a page doesn't have buffers the page dirty state is definitive. If a page
* does have buffers, the page dirty state is just a hint, and the buffer dirty
* state is definitive. (A hint which has rules: dirty buffers against a clean
* page is illegal. Other combinations are legal and need to be handled. In
* particular a dirty page containing clean buffers for example.)
*
* Return 0 on success and -errno on error.
*
* Based on ntfs_read_block() and __block_write_full_folio().
*/
static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
{
VCN vcn;
LCN lcn;
s64 initialized_size;
loff_t i_size;
sector_t block, dblock, iblock;
struct inode *vi;
ntfs_inode *ni;
ntfs_volume *vol;
runlist_element *rl;
struct buffer_head *bh, *head;
unsigned long flags;
unsigned int blocksize, vcn_ofs;
int err;
bool need_end_writeback;
unsigned char blocksize_bits;
vi = page->mapping->host;
ni = NTFS_I(vi);
vol = ni->vol;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
"0x%lx.", ni->mft_no, ni->type, page->index);
BUG_ON(!NInoNonResident(ni));
BUG_ON(NInoMstProtected(ni));
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
if (!page_has_buffers(page)) {
BUG_ON(!PageUptodate(page));
create_empty_buffers(page, blocksize,
(1 << BH_Uptodate) | (1 << BH_Dirty));
if (unlikely(!page_has_buffers(page))) {
ntfs_warning(vol->sb, "Error allocating page "
"buffers. Redirtying page so we try "
"again later.");
/*
* Put the page back on mapping->dirty_pages, but leave
* its buffers' dirty state as-is.
*/
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
}
bh = head = page_buffers(page);
BUG_ON(!bh);
/* NOTE: Different naming scheme to ntfs_read_block()! */
/* The first block in the page. */
block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
read_lock_irqsave(&ni->size_lock, flags);
i_size = i_size_read(vi);
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/* The first out of bounds block for the data size. */
dblock = (i_size + blocksize - 1) >> blocksize_bits;
/* The last (fully or partially) initialized block. */
iblock = initialized_size >> blocksize_bits;
/*
* Be very careful. We have no exclusion from block_dirty_folio
* here, and the (potentially unmapped) buffers may become dirty at
* any time. If a buffer becomes dirty here after we've inspected it
* then we just miss that fact, and the page stays dirty.
*
* Buffers outside i_size may be dirtied by block_dirty_folio;
* handle that here by just cleaning them.
*/
/*
* Loop through all the buffers in the page, mapping all the dirty
* buffers to disk addresses and handling any aliases from the
* underlying block device's mapping.
*/
rl = NULL;
err = 0;
do {
bool is_retry = false;
if (unlikely(block >= dblock)) {
/*
* Mapped buffers outside i_size will occur, because
* this page can be outside i_size when there is a
* truncate in progress. The contents of such buffers
* were zeroed by ntfs_writepage().
*
* FIXME: What about the small race window where
* ntfs_writepage() has not done any clearing because
* the page was within i_size but before we get here,
* vmtruncate() modifies i_size?
*/
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
}
/* Clean buffers are not written out, so no need to map them. */
if (!buffer_dirty(bh))
continue;
/* Make sure we have enough initialized size. */
if (unlikely((block >= iblock) &&
(initialized_size < i_size))) {
/*
* If this page is fully outside initialized
* size, zero out all pages between the current
* initialized size and the current page. Just
* use ntfs_read_folio() to do the zeroing
* transparently.
*/
if (block > iblock) {
// TODO:
// For each page do:
// - read_cache_page()
// Again for each page do:
// - wait_on_page_locked()
// - Check (PageUptodate(page) &&
// !PageError(page))
// Update initialized size in the attribute and
// in the inode.
// Again, for each page do:
// block_dirty_folio();
// put_page()
// We don't need to wait on the writes.
// Update iblock.
}
/*
* The current page straddles initialized size. Zero
* all non-uptodate buffers and set them uptodate (and
* dirty?). Note, there aren't any non-uptodate buffers
* if the page is uptodate.
* FIXME: For an uptodate page, the buffers may need to
* be written out because they were not initialized on
* disk before.
*/
if (!PageUptodate(page)) {
// TODO:
// Zero any non-uptodate buffers up to i_size.
// Set them uptodate and dirty.
}
// TODO:
// Update initialized size in the attribute and in the
// inode (up to i_size).
// Update iblock.
// FIXME: This is inefficient. Try to batch the two
// size changes to happen in one go.
ntfs_error(vol->sb, "Writing beyond initialized size "
"is not supported yet. Sorry.");
err = -EOPNOTSUPP;
break;
// Do NOT set_buffer_new() BUT DO clear buffer range
// outside write request range.
// set_buffer_uptodate() on complete buffers as well as
// set_buffer_dirty().
}
/* No need to map buffers that are already mapped. */
if (buffer_mapped(bh))
continue;
/* Unmapped, dirty buffer. Need to map it. */
bh->b_bdev = vol->sb->s_bdev;
/* Convert block into corresponding vcn and offset. */
vcn = (VCN)block << blocksize_bits;
vcn_ofs = vcn & vol->cluster_size_mask;
vcn >>= vol->cluster_size_bits;
if (!rl) {
lock_retry_remap:
down_read(&ni->runlist.lock);
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
lcn = LCN_RL_NOT_MAPPED;
/* Successful remap. */
if (lcn >= 0) {
/* Setup buffer head to point to correct block. */
bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
vcn_ofs) >> blocksize_bits;
set_buffer_mapped(bh);
continue;
}
/* It is a hole, need to instantiate it. */
if (lcn == LCN_HOLE) {
u8 *kaddr;
unsigned long *bpos, *bend;
/* Check if the buffer is zero. */
kaddr = kmap_atomic(page);
bpos = (unsigned long *)(kaddr + bh_offset(bh));
bend = (unsigned long *)((u8*)bpos + blocksize);
do {
if (unlikely(*bpos))
break;
} while (likely(++bpos < bend));
kunmap_atomic(kaddr);
if (bpos == bend) {
/*
* Buffer is zero and sparse, no need to write
* it.
*/
bh->b_blocknr = -1;
clear_buffer_dirty(bh);
continue;
}
// TODO: Instantiate the hole.
// clear_buffer_new(bh);
// clean_bdev_bh_alias(bh);
ntfs_error(vol->sb, "Writing into sparse regions is "
"not supported yet. Sorry.");
err = -EOPNOTSUPP;
break;
}
/* If first try and runlist unmapped, map and retry. */
if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
is_retry = true;
/*
* Attempt to map runlist, dropping lock for
* the duration.
*/
up_read(&ni->runlist.lock);
err = ntfs_map_runlist(ni, vcn);
if (likely(!err))
goto lock_retry_remap;
rl = NULL;
} else if (!rl)
up_read(&ni->runlist.lock);
/*
* If buffer is outside the runlist, truncate has cut it out
* of the runlist. Just clean and clear the buffer and set it
* uptodate so it can get discarded by the VM.
*/
if (err == -ENOENT || lcn == LCN_ENOENT) {
bh->b_blocknr = -1;
clear_buffer_dirty(bh);
zero_user(page, bh_offset(bh), blocksize);
set_buffer_uptodate(bh);
err = 0;
continue;
}
/* Failed to map the buffer, even after retrying. */
if (!err)
err = -EIO;
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
"attribute type 0x%x, vcn 0x%llx, offset 0x%x "
"because its location on disk could not be "
"determined%s (error code %i).", ni->mft_no,
ni->type, (unsigned long long)vcn,
vcn_ofs, is_retry ? " even after "
"retrying" : "", err);
break;
} while (block++, (bh = bh->b_this_page) != head);
/* Release the lock if we took it. */
if (rl)
up_read(&ni->runlist.lock);
/* For the error case, need to reset bh to the beginning. */
bh = head;
/* Just an optimization, so ->read_folio() is not called later. */
if (unlikely(!PageUptodate(page))) {
int uptodate = 1;
do {
if (!buffer_uptodate(bh)) {
uptodate = 0;
bh = head;
break;
}
} while ((bh = bh->b_this_page) != head);
if (uptodate)
SetPageUptodate(page);
}
/* Setup all mapped, dirty buffers for async write i/o. */
do {
if (buffer_mapped(bh) && buffer_dirty(bh)) {
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
BUG_ON(!buffer_uptodate(bh));
mark_buffer_async_write(bh);
} else
unlock_buffer(bh);
} else if (unlikely(err)) {
/*
* For the error case. The buffer may have been set
* dirty during attachment to a dirty page.
*/
if (err != -ENOMEM)
clear_buffer_dirty(bh);
}
} while ((bh = bh->b_this_page) != head);
if (unlikely(err)) {
// TODO: Remove the -EOPNOTSUPP check later on...
if (unlikely(err == -EOPNOTSUPP))
err = 0;
else if (err == -ENOMEM) {
ntfs_warning(vol->sb, "Error allocating memory. "
"Redirtying page so we try again "
"later.");
/*
* Put the page back on mapping->dirty_pages, but
* leave its buffer's dirty state as-is.
*/
redirty_page_for_writepage(wbc, page);
err = 0;
} else
SetPageError(page);
}
BUG_ON(PageWriteback(page));
set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
/* Submit the prepared buffers for i/o. */
need_end_writeback = true;
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(REQ_OP_WRITE, bh);
need_end_writeback = false;
}
bh = next;
} while (bh != head);
unlock_page(page);
/* If no i/o was started, need to end_page_writeback(). */
if (unlikely(need_end_writeback))
end_page_writeback(page);
ntfs_debug("Done.");
return err;
}
/**
* ntfs_write_mst_block - write a @page to the backing store
* @page: page cache page to write out
* @wbc: writeback control structure
*
* This function is for writing pages belonging to non-resident, mst protected
* attributes to their backing store. The only supported attributes are index
* allocation and $MFT/$DATA. Both directory inodes and index inodes are
* supported for the index allocation case.
*
* The page must remain locked for the duration of the write because we apply
* the mst fixups, write, and then undo the fixups, so if we were to unlock the
* page before undoing the fixups, any other user of the page will see the
* page contents as corrupt.
*
* We clear the page uptodate flag for the duration of the function to ensure
* exclusion for the $MFT/$DATA case against someone mapping an mft record we
* are about to apply the mst fixups to.
*
* Return 0 on success and -errno on error.
*
* Based on ntfs_write_block(), ntfs_mft_writepage(), and
* write_mft_record_nolock().
*/
static int ntfs_write_mst_block(struct page *page,
struct writeback_control *wbc)
{
sector_t block, dblock, rec_block;
struct inode *vi = page->mapping->host;
ntfs_inode *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
u8 *kaddr;
unsigned int rec_size = ni->itype.index.block_size;
ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
struct buffer_head *bh, *head, *tbh, *rec_start_bh;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
runlist_element *rl;
int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
unsigned bh_size, rec_size_bits;
bool sync, is_mft, page_is_dirty, rec_is_dirty;
unsigned char bh_size_bits;
if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
return -EINVAL;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
"0x%lx.", vi->i_ino, ni->type, page->index);
BUG_ON(!NInoNonResident(ni));
BUG_ON(!NInoMstProtected(ni));
is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
/*
* NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
* in its page cache were to be marked dirty. However this should
* never happen with the current driver and considering we do not
* handle this case here we do want to BUG(), at least for now.
*/
BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
(NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
bh_size = vol->sb->s_blocksize;
bh_size_bits = vol->sb->s_blocksize_bits;
max_bhs = PAGE_SIZE / bh_size;
BUG_ON(!max_bhs);
BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
/* Were we called for sync purposes? */
sync = (wbc->sync_mode == WB_SYNC_ALL);
/* Make sure we have mapped buffers. */
bh = head = page_buffers(page);
BUG_ON(!bh);
rec_size_bits = ni->itype.index.block_size_bits;
BUG_ON(!(PAGE_SIZE >> rec_size_bits));
bhs_per_rec = rec_size >> bh_size_bits;
BUG_ON(!bhs_per_rec);
/* The first block in the page. */
rec_block = block = (sector_t)page->index <<
(PAGE_SHIFT - bh_size_bits);
/* The first out of bounds block for the data size. */
dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
rl = NULL;
err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
page_is_dirty = rec_is_dirty = false;
rec_start_bh = NULL;
do {
bool is_retry = false;
if (likely(block < rec_block)) {
if (unlikely(block >= dblock)) {
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
}
/*
* This block is not the first one in the record. We
* ignore the buffer's dirty state because we could
* have raced with a parallel mark_ntfs_record_dirty().
*/
if (!rec_is_dirty)
continue;
if (unlikely(err2)) {
if (err2 != -ENOMEM)
clear_buffer_dirty(bh);
continue;
}
} else /* if (block == rec_block) */ {
BUG_ON(block > rec_block);
/* This block is the first one in the record. */
rec_block += bhs_per_rec;
err2 = 0;
if (unlikely(block >= dblock)) {
clear_buffer_dirty(bh);
continue;
}
if (!buffer_dirty(bh)) {
/* Clean records are not written out. */
rec_is_dirty = false;
continue;
}
rec_is_dirty = true;
rec_start_bh = bh;
}
/* Need to map the buffer if it is not mapped already. */
if (unlikely(!buffer_mapped(bh))) {
VCN vcn;
LCN lcn;
unsigned int vcn_ofs;
bh->b_bdev = vol->sb->s_bdev;
/* Obtain the vcn and offset of the current block. */
vcn = (VCN)block << bh_size_bits;
vcn_ofs = vcn & vol->cluster_size_mask;
vcn >>= vol->cluster_size_bits;
if (!rl) {
lock_retry_remap:
down_read(&ni->runlist.lock);
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target vcn. */
while (rl->length && rl[1].vcn <= vcn)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
} else
lcn = LCN_RL_NOT_MAPPED;
/* Successful remap. */
if (likely(lcn >= 0)) {
/* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn <<
vol->cluster_size_bits) +
vcn_ofs) >> bh_size_bits;
set_buffer_mapped(bh);
} else {
/*
* Remap failed. Retry to map the runlist once
* unless we are working on $MFT which always
* has the whole of its runlist in memory.
*/
if (!is_mft && !is_retry &&
lcn == LCN_RL_NOT_MAPPED) {
is_retry = true;
/*
* Attempt to map runlist, dropping
* lock for the duration.
*/
up_read(&ni->runlist.lock);
err2 = ntfs_map_runlist(ni, vcn);
if (likely(!err2))
goto lock_retry_remap;
if (err2 == -ENOMEM)
page_is_dirty = true;
lcn = err2;
} else {
err2 = -EIO;
if (!rl)
up_read(&ni->runlist.lock);
}
/* Hard error. Abort writing this record. */
if (!err || err == -ENOMEM)
err = err2;
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Cannot write ntfs record "
"0x%llx (inode 0x%lx, "
"attribute type 0x%x) because "
"its location on disk could "
"not be determined (error "
"code %lli).",
(long long)block <<
bh_size_bits >>
vol->mft_record_size_bits,
ni->mft_no, ni->type,
(long long)lcn);
/*
* If this is not the first buffer, remove the
* buffers in this record from the list of
* buffers to write and clear their dirty bit
* if not error -ENOMEM.
*/
if (rec_start_bh != bh) {
while (bhs[--nr_bhs] != rec_start_bh)
;
if (err2 != -ENOMEM) {
do {
clear_buffer_dirty(
rec_start_bh);
} while ((rec_start_bh =
rec_start_bh->
b_this_page) !=
bh);
}
}
continue;
}
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(nr_bhs >= max_bhs);
bhs[nr_bhs++] = bh;
} while (block++, (bh = bh->b_this_page) != head);
if (unlikely(rl))
up_read(&ni->runlist.lock);
/* If there were no dirty buffers, we are done. */
if (!nr_bhs)
goto done;
/* Map the page so we can access its contents. */
kaddr = kmap(page);
/* Clear the page uptodate flag whilst the mst fixups are applied. */
BUG_ON(!PageUptodate(page));
ClearPageUptodate(page);
for (i = 0; i < nr_bhs; i++) {
unsigned int ofs;
/* Skip buffers which are not at the beginning of records. */
if (i % bhs_per_rec)
continue;
tbh = bhs[i];
ofs = bh_offset(tbh);
if (is_mft) {
ntfs_inode *tni;
unsigned long mft_no;
/* Get the mft record number. */
mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
>> rec_size_bits;
/* Check whether to write this mft record. */
tni = NULL;
if (!ntfs_may_write_mft_record(vol, mft_no,
(MFT_RECORD*)(kaddr + ofs), &tni)) {
/*
* The record should not be written. This
* means we need to redirty the page before
* returning.
*/
page_is_dirty = true;
/*
* Remove the buffers in this mft record from
* the list of buffers to write.
*/
do {
bhs[i] = NULL;
} while (++i % bhs_per_rec);
continue;
}
/*
* The record should be written. If a locked ntfs
* inode was returned, add it to the array of locked
* ntfs inodes.
*/
if (tni)
locked_nis[nr_locked_nis++] = tni;
}
/* Apply the mst protection fixups. */
err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
rec_size);
if (unlikely(err2)) {
if (!err || err == -ENOMEM)
err = -EIO;
ntfs_error(vol->sb, "Failed to apply mst fixups "
"(inode 0x%lx, attribute type 0x%x, "
"page index 0x%lx, page offset 0x%x)!"
" Unmount and run chkdsk.", vi->i_ino,
ni->type, page->index, ofs);
/*
* Mark all the buffers in this record clean as we do
* not want to write corrupt data to disk.
*/
do {
clear_buffer_dirty(bhs[i]);
bhs[i] = NULL;
} while (++i % bhs_per_rec);
continue;
}
nr_recs++;
}
/* If no records are to be written out, we are done. */
if (!nr_recs)
goto unm_done;
flush_dcache_page(page);
/* Lock buffers and start synchronous write i/o on them. */
for (i = 0; i < nr_bhs; i++) {
tbh = bhs[i];
if (!tbh)
continue;
if (!trylock_buffer(tbh))
BUG();
/* The buffer dirty state is now irrelevant, just clean it. */
clear_buffer_dirty(tbh);
BUG_ON(!buffer_uptodate(tbh));
BUG_ON(!buffer_mapped(tbh));
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
submit_bh(REQ_OP_WRITE, tbh);
}
/* Synchronize the mft mirror now if not @sync. */
if (is_mft && !sync)
goto do_mirror;
do_wait:
/* Wait on i/o completion of buffers. */
for (i = 0; i < nr_bhs; i++) {
tbh = bhs[i];
if (!tbh)
continue;
wait_on_buffer(tbh);
if (unlikely(!buffer_uptodate(tbh))) {
ntfs_error(vol->sb, "I/O error while writing ntfs "
"record buffer (inode 0x%lx, "
"attribute type 0x%x, page index "
"0x%lx, page offset 0x%lx)! Unmount "
"and run chkdsk.", vi->i_ino, ni->type,
page->index, bh_offset(tbh));
if (!err || err == -ENOMEM)
err = -EIO;
/*
* Set the buffer uptodate so the page and buffer
* states do not become out of sync.
*/
set_buffer_uptodate(tbh);
}
}
/* If @sync, now synchronize the mft mirror. */
if (is_mft && sync) {
do_mirror:
for (i = 0; i < nr_bhs; i++) {
unsigned long mft_no;
unsigned int ofs;
/*
* Skip buffers which are not at the beginning of
* records.
*/
if (i % bhs_per_rec)
continue;
tbh = bhs[i];
/* Skip removed buffers (and hence records). */
if (!tbh)
continue;
ofs = bh_offset(tbh);
/* Get the mft record number. */
mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
>> rec_size_bits;
if (mft_no < vol->mftmirr_size)
ntfs_sync_mft_mirror(vol, mft_no,
(MFT_RECORD*)(kaddr + ofs),
sync);
}
if (!sync)
goto do_wait;
}
/* Remove the mst protection fixups again. */
for (i = 0; i < nr_bhs; i++) {
if (!(i % bhs_per_rec)) {
tbh = bhs[i];
if (!tbh)
continue;
post_write_mst_fixup((NTFS_RECORD*)(kaddr +
bh_offset(tbh)));
}
}
flush_dcache_page(page);
unm_done:
/* Unlock any locked inodes. */
while (nr_locked_nis-- > 0) {
ntfs_inode *tni, *base_tni;
tni = locked_nis[nr_locked_nis];
/* Get the base inode. */
mutex_lock(&tni->extent_lock);
if (tni->nr_extents >= 0)
base_tni = tni;
else {
base_tni = tni->ext.base_ntfs_ino;
BUG_ON(!base_tni);
}
mutex_unlock(&tni->extent_lock);
ntfs_debug("Unlocking %s inode 0x%lx.",
tni == base_tni ? "base" : "extent",
tni->mft_no);
mutex_unlock(&tni->mrec_lock);
atomic_dec(&tni->count);
iput(VFS_I(base_tni));
}
SetPageUptodate(page);
kunmap(page);
done:
if (unlikely(err && err != -ENOMEM)) {
/*
* Set page error if there is only one ntfs record in the page.
* Otherwise we would loose per-record granularity.
*/
if (ni->itype.index.block_size == PAGE_SIZE)
SetPageError(page);
NVolSetErrors(vol);
}
if (page_is_dirty) {
ntfs_debug("Page still contains one or more dirty ntfs "
"records. Redirtying the page starting at "
"record 0x%lx.", page->index <<
(PAGE_SHIFT - rec_size_bits));
redirty_page_for_writepage(wbc, page);
unlock_page(page);
} else {
/*
* Keep the VM happy. This must be done otherwise the
* radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
* the page is clean.
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
end_page_writeback(page);
}
if (likely(!err))
ntfs_debug("Done.");
return err;
}
/**
* ntfs_writepage - write a @page to the backing store
* @page: page cache page to write out
* @wbc: writeback control structure
*
* This is called from the VM when it wants to have a dirty ntfs page cache
* page cleaned. The VM has already locked the page and marked it clean.
*
* For non-resident attributes, ntfs_writepage() writes the @page by calling
* the ntfs version of the generic block_write_full_page() function,
* ntfs_write_block(), which in turn if necessary creates and writes the
* buffers associated with the page asynchronously.
*
* For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
* the data to the mft record (which at this stage is most likely in memory).
* The mft record is then marked dirty and written out asynchronously via the
* vfs inode dirty code path for the inode the mft record belongs to or via the
* vm page dirty code path for the page the mft record is in.
*
* Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page().
*
* Return 0 on success and -errno on error.
*/
static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
{
loff_t i_size;
struct inode *vi = page->mapping->host;
ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
char *addr;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *m = NULL;
u32 attr_len;
int err;
retry_writepage:
BUG_ON(!PageLocked(page));
i_size = i_size_read(vi);
/* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT)) {
struct folio *folio = page_folio(page);
/*
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
block_invalidate_folio(folio, 0, folio_size(folio));
folio_unlock(folio);
ntfs_debug("Write outside i_size - truncated?");
return 0;
}
/*
* Only $DATA attributes can be encrypted and only unnamed $DATA
* attributes can be compressed. Index root can have the flags set but
* this means to create compressed/encrypted files, not that the
* attribute is compressed/encrypted. Note we need to check for
* AT_INDEX_ALLOCATION since this is the type of both directory and
* index inodes.
*/
if (ni->type != AT_INDEX_ALLOCATION) {
/* If file is encrypted, deny access, just like NT4. */
if (NInoEncrypted(ni)) {
unlock_page(page);
BUG_ON(ni->type != AT_DATA);
ntfs_debug("Denying write access to encrypted file.");
return -EACCES;
}
/* Compressed data streams are handled in compress.c. */
if (NInoNonResident(ni) && NInoCompressed(ni)) {
BUG_ON(ni->type != AT_DATA);
BUG_ON(ni->name_len);
// TODO: Implement and replace this with
// return ntfs_write_compressed_block(page);
unlock_page(page);
ntfs_error(vi->i_sb, "Writing to compressed files is "
"not supported yet. Sorry.");
return -EOPNOTSUPP;
}
// TODO: Implement and remove this check.
if (NInoNonResident(ni) && NInoSparse(ni)) {
unlock_page(page);
ntfs_error(vi->i_sb, "Writing to sparse files is not "
"supported yet. Sorry.");
return -EOPNOTSUPP;
}
}
/* NInoNonResident() == NInoIndexAllocPresent() */
if (NInoNonResident(ni)) {
/* We have to zero every time due to mmap-at-end-of-file. */
if (page->index >= (i_size >> PAGE_SHIFT)) {
/* The page straddles i_size. */
unsigned int ofs = i_size & ~PAGE_MASK;
zero_user_segment(page, ofs, PAGE_SIZE);
}
/* Handle mst protected attributes. */
if (NInoMstProtected(ni))
return ntfs_write_mst_block(page, wbc);
/* Normal, non-resident data stream. */
return ntfs_write_block(page, wbc);
}
/*
* Attribute is resident, implying it is not compressed, encrypted, or
* mst protected. This also means the attribute is smaller than an mft
* record and hence smaller than a page, so can simply return error on
* any pages with index above 0. Note the attribute can actually be
* marked compressed but if it is resident the actual data is not
* compressed so we are ok to ignore the compressed flag here.
*/
BUG_ON(page_has_buffers(page));
BUG_ON(!PageUptodate(page));
if (unlikely(page->index > 0)) {
ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
"Aborting write.", page->index);
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
end_page_writeback(page);
return -EIO;
}
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/* Map, pin, and lock the mft record. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
/*
* If a parallel write made the attribute non-resident, drop the mft
* record and retry the writepage.
*/
if (unlikely(NInoNonResident(ni))) {
unmap_mft_record(base_ni);
goto retry_writepage;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto err_out;
/*
* Keep the VM happy. This must be done otherwise the radix-tree tag
* PAGECACHE_TAG_DIRTY remains set even though the page is clean.
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);
unlock_page(page);
attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
i_size = i_size_read(vi);
if (unlikely(attr_len > i_size)) {
/* Race with shrinking truncate or a failed truncate. */
attr_len = i_size;
/*
* If the truncate failed, fix it up now. If a concurrent
* truncate, we do its job, so it does not have to do anything.
*/
err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
attr_len);
/* Shrinking cannot fail. */
BUG_ON(err);
}
addr = kmap_atomic(page);
/* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset),
addr, attr_len);
/* Zero out of bounds area in the page cache page. */
memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
kunmap_atomic(addr);
flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino);
/* We are done with the page. */
end_page_writeback(page);
/* Finally, mark the mft record dirty, so it gets written back. */
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
return 0;
err_out:
if (err == -ENOMEM) {
ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
"page so we try again later.");
/*
* Put the page back on mapping->dirty_pages, but leave its
* buffers' dirty state as-is.
*/
redirty_page_for_writepage(wbc, page);
err = 0;
} else {
ntfs_error(vi->i_sb, "Resident attribute write failed with "
"error %i.", err);
SetPageError(page);
NVolSetErrors(ni->vol);
}
unlock_page(page);
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
return err;
}
#endif /* NTFS_RW */
/**
* ntfs_bmap - map logical file block to physical device block
* @mapping: address space mapping to which the block to be mapped belongs
* @block: logical block to map to its physical device block
*
* For regular, non-resident files (i.e. not compressed and not encrypted), map
* the logical @block belonging to the file described by the address space
* mapping @mapping to its physical device block.
*
* The size of the block is equal to the @s_blocksize field of the super block
* of the mounted file system which is guaranteed to be smaller than or equal
* to the cluster size thus the block is guaranteed to fit entirely inside the
* cluster which means we do not need to care how many contiguous bytes are
* available after the beginning of the block.
*
* Return the physical device block if the mapping succeeded or 0 if the block
* is sparse or there was an error.
*
* Note: This is a problem if someone tries to run bmap() on $Boot system file
* as that really is in block zero but there is nothing we can do. bmap() is
* just broken in that respect (just like it cannot distinguish sparse from
* not available or error).
*/
static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
{
s64 ofs, size;
loff_t i_size;
LCN lcn;
unsigned long blocksize, flags;
ntfs_inode *ni = NTFS_I(mapping->host);
ntfs_volume *vol = ni->vol;
unsigned delta;
unsigned char blocksize_bits, cluster_size_shift;
ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
ni->mft_no, (unsigned long long)block);
if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
ntfs_error(vol->sb, "BMAP does not make sense for %s "
"attributes, returning 0.",
(ni->type != AT_DATA) ? "non-data" :
(!NInoNonResident(ni) ? "resident" :
"encrypted"));
return 0;
}
/* None of these can happen. */
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoMstProtected(ni));
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
ofs = (s64)block << blocksize_bits;
read_lock_irqsave(&ni->size_lock, flags);
size = ni->initialized_size;
i_size = i_size_read(VFS_I(ni));
read_unlock_irqrestore(&ni->size_lock, flags);
/*
* If the offset is outside the initialized size or the block straddles
* the initialized size then pretend it is a hole unless the
* initialized size equals the file size.
*/
if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
goto hole;
cluster_size_shift = vol->cluster_size_bits;
down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
/*
* Step down to an integer to avoid gcc doing a long long
* comparision in the switch when we know @lcn is between
* LCN_HOLE and LCN_EIO (i.e. -1 to -5).
*
* Otherwise older gcc (at least on some architectures) will
* try to use __cmpdi2() which is of course not available in
* the kernel.
*/
switch ((int)lcn) {
case LCN_ENOENT:
/*
* If the offset is out of bounds then pretend it is a
* hole.
*/
goto hole;
case LCN_ENOMEM:
ntfs_error(vol->sb, "Not enough memory to complete "
"mapping for inode 0x%lx. "
"Returning 0.", ni->mft_no);
break;
default:
ntfs_error(vol->sb, "Failed to complete mapping for "
"inode 0x%lx. Run chkdsk. "
"Returning 0.", ni->mft_no);
break;
}
return 0;
}
if (lcn < 0) {
/* It is a hole. */
hole:
ntfs_debug("Done (returning hole).");
return 0;
}
/*
* The block is really allocated and fullfils all our criteria.
* Convert the cluster to units of block size and return the result.
*/
delta = ofs & vol->cluster_size_mask;
if (unlikely(sizeof(block) < sizeof(lcn))) {
block = lcn = ((lcn << cluster_size_shift) + delta) >>
blocksize_bits;
/* If the block number was truncated return 0. */
if (unlikely(block != lcn)) {
ntfs_error(vol->sb, "Physical block 0x%llx is too "
"large to be returned, returning 0.",
(long long)lcn);
return 0;
}
} else
block = ((lcn << cluster_size_shift) + delta) >>
blocksize_bits;
ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
return block;
}
/*
* ntfs_normal_aops - address space operations for normal inodes and attributes
*
* Note these are not used for compressed or mst protected inodes and
* attributes.
*/
const struct address_space_operations ntfs_normal_aops = {
.read_folio = ntfs_read_folio,
#ifdef NTFS_RW
.writepage = ntfs_writepage,
.dirty_folio = block_dirty_folio,
#endif /* NTFS_RW */
.bmap = ntfs_bmap,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
/*
* ntfs_compressed_aops - address space operations for compressed inodes
*/
const struct address_space_operations ntfs_compressed_aops = {
.read_folio = ntfs_read_folio,
#ifdef NTFS_RW
.writepage = ntfs_writepage,
.dirty_folio = block_dirty_folio,
#endif /* NTFS_RW */
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
/*
* ntfs_mst_aops - general address space operations for mst protecteed inodes
* and attributes
*/
const struct address_space_operations ntfs_mst_aops = {
.read_folio = ntfs_read_folio, /* Fill page with data. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.dirty_folio = filemap_dirty_folio,
#endif /* NTFS_RW */
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
#ifdef NTFS_RW
/**
* mark_ntfs_record_dirty - mark an ntfs record dirty
* @page: page containing the ntfs record to mark dirty
* @ofs: byte offset within @page at which the ntfs record begins
*
* Set the buffers and the page in which the ntfs record is located dirty.
*
* The latter also marks the vfs inode the ntfs record belongs to dirty
* (I_DIRTY_PAGES only).
*
* If the page does not have buffers, we create them and set them uptodate.
* The page may not be locked which is why we need to handle the buffers under
* the mapping->private_lock. Once the buffers are marked dirty we no longer
* need the lock since try_to_free_buffers() does not free dirty buffers.
*/
void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
struct address_space *mapping = page->mapping;
ntfs_inode *ni = NTFS_I(mapping->host);
struct buffer_head *bh, *head, *buffers_to_free = NULL;
unsigned int end, bh_size, bh_ofs;
BUG_ON(!PageUptodate(page));
end = ofs + ni->itype.index.block_size;
bh_size = VFS_I(ni)->i_sb->s_blocksize;
spin_lock(&mapping->private_lock);
if (unlikely(!page_has_buffers(page))) {
spin_unlock(&mapping->private_lock);
bh = head = alloc_page_buffers(page, bh_size, true);
spin_lock(&mapping->private_lock);
if (likely(!page_has_buffers(page))) {
struct buffer_head *tail;
do {
set_buffer_uptodate(bh);
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
attach_page_private(page, head);
} else
buffers_to_free = bh;
}
bh = head = page_buffers(page);
BUG_ON(!bh);
do {
bh_ofs = bh_offset(bh);
if (bh_ofs + bh_size <= ofs)
continue;
if (unlikely(bh_ofs >= end))
break;
set_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
spin_unlock(&mapping->private_lock);
filemap_dirty_folio(mapping, page_folio(page));
if (unlikely(buffers_to_free)) {
do {
bh = buffers_to_free->b_this_page;
free_buffer_head(buffers_to_free);
buffers_to_free = bh;
} while (buffers_to_free);
}
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/aops.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2002 Richard Russon
*/
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include "attrib.h"
#include "debug.h"
#include "layout.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "ntfs.h"
#include "types.h"
/**
* ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
* @ctx: active attribute search context if present or NULL if not
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when ntfs_map_runlist_nolock() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
* will perform the necessary mapping and unmapping.
*
* Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
* restores it before returning. Thus, @ctx will be left pointing to the same
* attribute on return as on entry. However, the actual pointers in @ctx may
* point to different memory locations on return, so you must remember to reset
* any cached pointers from the @ctx, i.e. after the call to
* ntfs_map_runlist_nolock(), you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
*
* Return 0 on success and -errno on error. There is one special error code
* which is not an error as such. This is -ENOENT. It means that @vcn is out
* of bounds of the runlist.
*
* Note the runlist can be NULL after this function returns if @vcn is zero and
* the attribute has zero allocated size, i.e. there simply is no runlist.
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist will be modified.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
{
VCN end_vcn;
unsigned long flags;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
runlist_element *rl;
struct page *put_this_page = NULL;
int err = 0;
bool ctx_is_temporary, ctx_needs_reset;
ntfs_attr_search_ctx old_ctx = { NULL, };
ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
(unsigned long long)vcn);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
if (!ctx) {
ctx_is_temporary = ctx_needs_reset = true;
m = map_mft_record(base_ni);
if (IS_ERR(m))
return PTR_ERR(m);
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
} else {
VCN allocated_size_vcn;
BUG_ON(IS_ERR(ctx->mrec));
a = ctx->attr;
BUG_ON(!a->non_resident);
ctx_is_temporary = false;
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
read_lock_irqsave(&ni->size_lock, flags);
allocated_size_vcn = ni->allocated_size >>
ni->vol->cluster_size_bits;
read_unlock_irqrestore(&ni->size_lock, flags);
if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
end_vcn = allocated_size_vcn - 1;
/*
* If we already have the attribute extent containing @vcn in
* @ctx, no need to look it up again. We slightly cheat in
* that if vcn exceeds the allocated size, we will refuse to
* map the runlist below, so there is definitely no need to get
* the right attribute extent.
*/
if (vcn >= allocated_size_vcn || (a->type == ni->type &&
a->name_length == ni->name_len &&
!memcmp((u8*)a + le16_to_cpu(a->name_offset),
ni->name, ni->name_len) &&
sle64_to_cpu(a->data.non_resident.lowest_vcn)
<= vcn && end_vcn >= vcn))
ctx_needs_reset = false;
else {
/* Save the old search context. */
old_ctx = *ctx;
/*
* If the currently mapped (extent) inode is not the
* base inode we will unmap it when we reinitialize the
* search context which means we need to get a
* reference to the page containing the mapped mft
* record so we do not accidentally drop changes to the
* mft record when it has not been marked dirty yet.
*/
if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
put_this_page = old_ctx.ntfs_ino->page;
get_page(put_this_page);
}
/*
* Reinitialize the search context so we can lookup the
* needed attribute extent.
*/
ntfs_attr_reinit_search_ctx(ctx);
ctx_needs_reset = true;
}
}
if (ctx_needs_reset) {
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
BUG_ON(!ctx->attr->non_resident);
}
a = ctx->attr;
/*
* Only decompress the mapping pairs if @vcn is inside it. Otherwise
* we get into problems when we try to map an out of bounds vcn because
* we then try to map the already mapped runlist fragment and
* ntfs_mapping_pairs_decompress() fails.
*/
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
if (unlikely(vcn && vcn >= end_vcn)) {
err = -ENOENT;
goto err_out;
}
rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
if (IS_ERR(rl))
err = PTR_ERR(rl);
else
ni->runlist.rl = rl;
err_out:
if (ctx_is_temporary) {
if (likely(ctx))
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
} else if (ctx_needs_reset) {
/*
* If there is no attribute list, restoring the search context
* is accomplished simply by copying the saved context back over
* the caller supplied context. If there is an attribute list,
* things are more complicated as we need to deal with mapping
* of mft records and resulting potential changes in pointers.
*/
if (NInoAttrList(base_ni)) {
/*
* If the currently mapped (extent) inode is not the
* one we had before, we need to unmap it and map the
* old one.
*/
if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
/*
* If the currently mapped inode is not the
* base inode, unmap it.
*/
if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
ctx->base_ntfs_ino) {
unmap_extent_mft_record(ctx->ntfs_ino);
ctx->mrec = ctx->base_mrec;
BUG_ON(!ctx->mrec);
}
/*
* If the old mapped inode is not the base
* inode, map it.
*/
if (old_ctx.base_ntfs_ino &&
old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
retry_map:
ctx->mrec = map_mft_record(
old_ctx.ntfs_ino);
/*
* Something bad has happened. If out
* of memory retry till it succeeds.
* Any other errors are fatal and we
* return the error code in ctx->mrec.
* Let the caller deal with it... We
* just need to fudge things so the
* caller can reinit and/or put the
* search context safely.
*/
if (IS_ERR(ctx->mrec)) {
if (PTR_ERR(ctx->mrec) ==
-ENOMEM) {
schedule();
goto retry_map;
} else
old_ctx.ntfs_ino =
old_ctx.
base_ntfs_ino;
}
}
}
/* Update the changed pointers in the saved context. */
if (ctx->mrec != old_ctx.mrec) {
if (!IS_ERR(ctx->mrec))
old_ctx.attr = (ATTR_RECORD*)(
(u8*)ctx->mrec +
((u8*)old_ctx.attr -
(u8*)old_ctx.mrec));
old_ctx.mrec = ctx->mrec;
}
}
/* Restore the search context to the saved one. */
*ctx = old_ctx;
/*
* We drop the reference on the page we took earlier. In the
* case that IS_ERR(ctx->mrec) is true this means we might lose
* some changes to the mft record that had been made between
* the last time it was marked dirty/written out and now. This
* at this stage is not a problem as the mapping error is fatal
* enough that the mft record cannot be written out anyway and
* the caller is very likely to shutdown the whole inode
* immediately and mark the volume dirty for chkdsk to pick up
* the pieces anyway.
*/
if (put_this_page)
put_page(put_this_page);
}
return err;
}
/**
* ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* Return 0 on success and -errno on error. There is one special error code
* which is not an error as such. This is -ENOENT. It means that @vcn is out
* of bounds of the runlist.
*
* Locking: - The runlist must be unlocked on entry and is unlocked on return.
* - This function takes the runlist lock for writing and may modify
* the runlist.
*/
int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
{
int err = 0;
down_write(&ni->runlist.lock);
/* Make sure someone else didn't do the work while we were sleeping. */
if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
LCN_RL_NOT_MAPPED))
err = ntfs_map_runlist_nolock(ni, vcn, NULL);
up_write(&ni->runlist.lock);
return err;
}
/**
* ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
* @ni: ntfs inode of the attribute whose runlist to search
* @vcn: vcn to convert
* @write_locked: true if the runlist is locked for writing
*
* Find the virtual cluster number @vcn in the runlist of the ntfs attribute
* described by the ntfs inode @ni and return the corresponding logical cluster
* number (lcn).
*
* If the @vcn is not mapped yet, the attempt is made to map the attribute
* extent containing the @vcn and the vcn to lcn conversion is retried.
*
* If @write_locked is true the caller has locked the runlist for writing and
* if false for reading.
*
* Since lcns must be >= 0, we use negative return codes with special meaning:
*
* Return code Meaning / Description
* ==========================================
* LCN_HOLE Hole / not allocated on disk.
* LCN_ENOENT There is no such vcn in the runlist, i.e. @vcn is out of bounds.
* LCN_ENOMEM Not enough memory to map runlist.
* LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
*
* Locking: - The runlist must be locked on entry and is left locked on return.
* - If @write_locked is 'false', i.e. the runlist is locked for reading,
* the lock may be dropped inside the function so you cannot rely on
* the runlist still being the same when this function returns.
*/
LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const bool write_locked)
{
LCN lcn;
unsigned long flags;
bool is_retry = false;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
ni->mft_no, (unsigned long long)vcn,
write_locked ? "write" : "read");
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
if (!ni->runlist.rl) {
read_lock_irqsave(&ni->size_lock, flags);
if (!ni->allocated_size) {
read_unlock_irqrestore(&ni->size_lock, flags);
return LCN_ENOENT;
}
read_unlock_irqrestore(&ni->size_lock, flags);
}
retry_remap:
/* Convert vcn to lcn. If that fails map the runlist and retry once. */
lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
if (likely(lcn >= LCN_HOLE)) {
ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
return lcn;
}
if (lcn != LCN_RL_NOT_MAPPED) {
if (lcn != LCN_ENOENT)
lcn = LCN_EIO;
} else if (!is_retry) {
int err;
if (!write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
LCN_RL_NOT_MAPPED)) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
goto retry_remap;
}
}
err = ntfs_map_runlist_nolock(ni, vcn, NULL);
if (!write_locked) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
}
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
if (err == -ENOENT)
lcn = LCN_ENOENT;
else if (err == -ENOMEM)
lcn = LCN_ENOMEM;
else
lcn = LCN_EIO;
}
if (lcn != LCN_ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %lli.",
(long long)lcn);
return lcn;
}
/**
* ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
* @ni: ntfs inode describing the runlist to search
* @vcn: vcn to find
* @ctx: active attribute search context if present or NULL if not
*
* Find the virtual cluster number @vcn in the runlist described by the ntfs
* inode @ni and return the address of the runlist element containing the @vcn.
*
* If the @vcn is not mapped yet, the attempt is made to map the attribute
* extent containing the @vcn and the vcn to lcn conversion is retried.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
* will perform the necessary mapping and unmapping.
*
* Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
* restores it before returning. Thus, @ctx will be left pointing to the same
* attribute on return as on entry. However, the actual pointers in @ctx may
* point to different memory locations on return, so you must remember to reset
* any cached pointers from the @ctx, i.e. after the call to
* ntfs_attr_find_vcn_nolock(), you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
* Note you need to distinguish between the lcn of the returned runlist element
* being >= 0 and LCN_HOLE. In the later case you have to return zeroes on
* read and allocate clusters on write.
*
* Return the runlist element containing the @vcn on success and
* ERR_PTR(-errno) on error. You need to test the return value with IS_ERR()
* to decide if the return is success or failure and PTR_ERR() to get to the
* error code if IS_ERR() is true.
*
* The possible error return codes are:
* -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
* -ENOMEM - Not enough memory to map runlist.
* -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist may be modified when
* needed runlist fragments need to be mapped.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
ntfs_attr_search_ctx *ctx)
{
unsigned long flags;
runlist_element *rl;
int err = 0;
bool is_retry = false;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
if (!ni->runlist.rl) {
read_lock_irqsave(&ni->size_lock, flags);
if (!ni->allocated_size) {
read_unlock_irqrestore(&ni->size_lock, flags);
return ERR_PTR(-ENOENT);
}
read_unlock_irqrestore(&ni->size_lock, flags);
}
retry_remap:
rl = ni->runlist.rl;
if (likely(rl && vcn >= rl[0].vcn)) {
while (likely(rl->length)) {
if (unlikely(vcn < rl[1].vcn)) {
if (likely(rl->lcn >= LCN_HOLE)) {
ntfs_debug("Done.");
return rl;
}
break;
}
rl++;
}
if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
if (likely(rl->lcn == LCN_ENOENT))
err = -ENOENT;
else
err = -EIO;
}
}
if (!err && !is_retry) {
/*
* If the search context is invalid we cannot map the unmapped
* region.
*/
if (IS_ERR(ctx->mrec))
err = PTR_ERR(ctx->mrec);
else {
/*
* The @vcn is in an unmapped region, map the runlist
* and retry.
*/
err = ntfs_map_runlist_nolock(ni, vcn, ctx);
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
}
if (err == -EINVAL)
err = -EIO;
} else if (!err)
err = -EIO;
if (err != -ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
return ERR_PTR(err);
}
/**
* ntfs_attr_find - find (next) attribute in mft record
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* You should not need to call this function directly. Use ntfs_attr_lookup()
* instead.
*
* ntfs_attr_find() takes a search context @ctx as parameter and searches the
* mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
* attribute of @type, optionally @name and @val.
*
* If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
* point to the found attribute.
*
* If the attribute is not found, ntfs_attr_find() returns -ENOENT and
* @ctx->attr will point to the attribute before which the attribute being
* searched for would need to be inserted if such an action were to be desired.
*
* On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
* undefined and in particular do not rely on it not changing.
*
* If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
* is 'false', the search begins after @ctx->attr.
*
* If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
* @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
* @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at
* the upcase table. If @ic is CASE_SENSITIVE, the comparison is case
* sensitive. When @name is present, @name_len is the @name length in Unicode
* characters.
*
* If @name is not present (NULL), we assume that the unnamed attribute is
* being searched for.
*
* Finally, the resident attribute value @val is looked for, if present. If
* @val is not present (NULL), @val_len is ignored.
*
* ntfs_attr_find() only searches the specified mft record and it ignores the
* presence of an attribute list attribute (unless it is the one being searched
* for, obviously). If you need to take attribute lists into consideration,
* use ntfs_attr_lookup() instead (see below). This also means that you cannot
* use ntfs_attr_find() to search for extent records of non-resident
* attributes, as extents with lowest_vcn != 0 are usually described by the
* attribute list attribute only. - Note that it is possible that the first
* extent is only in the attribute list while the last extent is in the base
* mft record, so do not rely on being able to find the first extent in the
* base mft record.
*
* Warning: Never use @val when looking for attribute types which can be
* non-resident as this most likely will result in a crash!
*/
static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
{
ATTR_RECORD *a;
ntfs_volume *vol = ctx->ntfs_ino->vol;
ntfschar *upcase = vol->upcase;
u32 upcase_len = vol->upcase_len;
/*
* Iterate over attributes in mft record starting at @ctx->attr, or the
* attribute following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
a = ctx->attr;
ctx->is_first = false;
} else
a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length));
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
u8 *mrec_end = (u8 *)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated);
u8 *name_end;
/* check whether ATTR_RECORD wrap */
if ((u8 *)a < (u8 *)ctx->mrec)
break;
/* check whether Attribute Record Header is within bounds */
if ((u8 *)a > mrec_end ||
(u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
break;
/* check whether ATTR_RECORD's name is within bounds */
name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
a->name_length * sizeof(ntfschar);
if (name_end > mrec_end)
break;
ctx->attr = a;
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
a->type == AT_END))
return -ENOENT;
if (unlikely(!a->length))
break;
/* check whether ATTR_RECORD's length wrap */
if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
break;
/* check whether ATTR_RECORD's length is within bounds */
if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
break;
if (a->type != type)
continue;
/*
* If @name is present, compare the two names. If @name is
* missing, assume we want an unnamed attribute.
*/
if (!name) {
/* The search failed if the found attribute is named. */
if (a->name_length)
return -ENOENT;
} else if (!ntfs_are_names_equal(name, name_len,
(ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
a->name_length, ic, upcase, upcase_len)) {
register int rc;
rc = ntfs_collate_names(name, name_len,
(ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)),
a->name_length, 1, IGNORE_CASE,
upcase, upcase_len);
/*
* If @name collates before a->name, there is no
* matching attribute.
*/
if (rc == -1)
return -ENOENT;
/* If the strings are not equal, continue search. */
if (rc)
continue;
rc = ntfs_collate_names(name, name_len,
(ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)),
a->name_length, 1, CASE_SENSITIVE,
upcase, upcase_len);
if (rc == -1)
return -ENOENT;
if (rc)
continue;
}
/*
* The names match or @name not present and attribute is
* unnamed. If no @val specified, we have found the attribute
* and are done.
*/
if (!val)
return 0;
/* @val is present; compare values. */
else {
register int rc;
rc = memcmp(val, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
min_t(u32, val_len, le32_to_cpu(
a->data.resident.value_length)));
/*
* If @val collates before the current attribute's
* value, there is no matching attribute.
*/
if (!rc) {
register u32 avl;
avl = le32_to_cpu(
a->data.resident.value_length);
if (val_len == avl)
return 0;
if (val_len < avl)
return -ENOENT;
} else if (rc < 0)
return -ENOENT;
}
}
ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk.");
NVolSetErrors(vol);
return -EIO;
}
/**
* load_attribute_list - load an attribute list into memory
* @vol: ntfs volume from which to read
* @runlist: runlist of the attribute list
* @al_start: destination buffer
* @size: size of the destination buffer in bytes
* @initialized_size: initialized size of the attribute list
*
* Walk the runlist @runlist and load all clusters from it copying them into
* the linear buffer @al. The maximum number of bytes copied to @al is @size
* bytes. Note, @size does not need to be a multiple of the cluster size. If
* @initialized_size is less than @size, the region in @al between
* @initialized_size and @size will be zeroed and not read from disk.
*
* Return 0 on success or -errno on error.
*/
int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
const s64 size, const s64 initialized_size)
{
LCN lcn;
u8 *al = al_start;
u8 *al_end = al + initialized_size;
runlist_element *rl;
struct buffer_head *bh;
struct super_block *sb;
unsigned long block_size;
unsigned long block, max_block;
int err = 0;
unsigned char block_size_bits;
ntfs_debug("Entering.");
if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
initialized_size > size)
return -EINVAL;
if (!initialized_size) {
memset(al, 0, size);
return 0;
}
sb = vol->sb;
block_size = sb->s_blocksize;
block_size_bits = sb->s_blocksize_bits;
down_read(&runlist->lock);
rl = runlist->rl;
if (!rl) {
ntfs_error(sb, "Cannot read attribute list since runlist is "
"missing.");
goto err_out;
}
/* Read all clusters specified by the runlist one run at a time. */
while (rl->length) {
lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)rl->vcn,
(unsigned long long)lcn);
/* The attribute list cannot be sparse. */
if (lcn < 0) {
ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
"read attribute list.");
goto err_out;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
/* Read the run from device in chunks of block_size bytes. */
max_block = block + (rl->length << vol->cluster_size_bits >>
block_size_bits);
ntfs_debug("max_block = 0x%lx.", max_block);
do {
ntfs_debug("Reading block = 0x%lx.", block);
bh = sb_bread(sb, block);
if (!bh) {
ntfs_error(sb, "sb_bread() failed. Cannot "
"read attribute list.");
goto err_out;
}
if (al + block_size >= al_end)
goto do_final;
memcpy(al, bh->b_data, block_size);
brelse(bh);
al += block_size;
} while (++block < max_block);
rl++;
}
if (initialized_size < size) {
initialize:
memset(al_start + initialized_size, 0, size - initialized_size);
}
done:
up_read(&runlist->lock);
return err;
do_final:
if (al < al_end) {
/*
* Partial block.
*
* Note: The attribute list can be smaller than its allocation
* by multiple clusters. This has been encountered by at least
* two people running Windows XP, thus we cannot do any
* truncation sanity checking here. (AIA)
*/
memcpy(al, bh->b_data, al_end - al);
brelse(bh);
if (initialized_size < size)
goto initialize;
goto done;
}
brelse(bh);
/* Real overflow! */
ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
"is truncated.");
err_out:
err = -EIO;
goto done;
}
/**
* ntfs_external_attr_find - find an attribute in the attribute list of an inode
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* You should not need to call this function directly. Use ntfs_attr_lookup()
* instead.
*
* Find an attribute by searching the attribute list for the corresponding
* attribute list entry. Having found the entry, map the mft record if the
* attribute is in a different mft record/inode, ntfs_attr_find() the attribute
* in there and return it.
*
* On first search @ctx->ntfs_ino must be the base mft record and @ctx must
* have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent
* calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
* then the base inode).
*
* After finishing with the attribute/mft record you need to call
* ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
* mapped inodes, etc).
*
* If the attribute is found, ntfs_external_attr_find() returns 0 and
* @ctx->attr will point to the found attribute. @ctx->mrec will point to the
* mft record in which @ctx->attr is located and @ctx->al_entry will point to
* the attribute list entry for the attribute.
*
* If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
* @ctx->attr will point to the attribute in the base mft record before which
* the attribute being searched for would need to be inserted if such an action
* were to be desired. @ctx->mrec will point to the mft record in which
* @ctx->attr is located and @ctx->al_entry will point to the attribute list
* entry of the attribute before which the attribute being searched for would
* need to be inserted if such an action were to be desired.
*
* Thus to insert the not found attribute, one wants to add the attribute to
* @ctx->mrec (the base mft record) and if there is not enough space, the
* attribute should be placed in a newly allocated extent mft record. The
* attribute list entry for the inserted attribute should be inserted in the
* attribute list attribute at @ctx->al_entry.
*
* On actual error, ntfs_external_attr_find() returns -EIO. In this case
* @ctx->attr is undefined and in particular do not rely on it not changing.
*/
static int ntfs_external_attr_find(const ATTR_TYPE type,
const ntfschar *name, const u32 name_len,
const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
{
ntfs_inode *base_ni, *ni;
ntfs_volume *vol;
ATTR_LIST_ENTRY *al_entry, *next_al_entry;
u8 *al_start, *al_end;
ATTR_RECORD *a;
ntfschar *al_name;
u32 al_name_len;
int err = 0;
static const char *es = " Unmount and run chkdsk.";
ni = ctx->ntfs_ino;
base_ni = ctx->base_ntfs_ino;
ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
if (!base_ni) {
/* First call happens with the base mft record. */
base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
ctx->base_mrec = ctx->mrec;
}
if (ni == base_ni)
ctx->base_attr = ctx->attr;
if (type == AT_END)
goto not_found;
vol = base_ni->vol;
al_start = base_ni->attr_list;
al_end = al_start + base_ni->attr_list_size;
if (!ctx->al_entry)
ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
/*
* Iterate over entries in attribute list starting at @ctx->al_entry,
* or the entry following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
al_entry = ctx->al_entry;
ctx->is_first = false;
} else
al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
le16_to_cpu(ctx->al_entry->length));
for (;; al_entry = next_al_entry) {
/* Out of bounds check. */
if ((u8*)al_entry < base_ni->attr_list ||
(u8*)al_entry > al_end)
break; /* Inode is corrupt. */
ctx->al_entry = al_entry;
/* Catch the end of the attribute list. */
if ((u8*)al_entry == al_end)
goto not_found;
if (!al_entry->length)
break;
if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
le16_to_cpu(al_entry->length) > al_end)
break;
next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
le16_to_cpu(al_entry->length));
if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
goto not_found;
if (type != al_entry->type)
continue;
/*
* If @name is present, compare the two names. If @name is
* missing, assume we want an unnamed attribute.
*/
al_name_len = al_entry->name_length;
al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
if (!name) {
if (al_name_len)
goto not_found;
} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
name_len, ic, vol->upcase, vol->upcase_len)) {
register int rc;
rc = ntfs_collate_names(name, name_len, al_name,
al_name_len, 1, IGNORE_CASE,
vol->upcase, vol->upcase_len);
/*
* If @name collates before al_name, there is no
* matching attribute.
*/
if (rc == -1)
goto not_found;
/* If the strings are not equal, continue search. */
if (rc)
continue;
/*
* FIXME: Reverse engineering showed 0, IGNORE_CASE but
* that is inconsistent with ntfs_attr_find(). The
* subsequent rc checks were also different. Perhaps I
* made a mistake in one of the two. Need to recheck
* which is correct or at least see what is going on...
* (AIA)
*/
rc = ntfs_collate_names(name, name_len, al_name,
al_name_len, 1, CASE_SENSITIVE,
vol->upcase, vol->upcase_len);
if (rc == -1)
goto not_found;
if (rc)
continue;
}
/*
* The names match or @name not present and attribute is
* unnamed. Now check @lowest_vcn. Continue search if the
* next attribute list entry still fits @lowest_vcn. Otherwise
* we have reached the right one or the search has failed.
*/
if (lowest_vcn && (u8*)next_al_entry >= al_start &&
(u8*)next_al_entry + 6 < al_end &&
(u8*)next_al_entry + le16_to_cpu(
next_al_entry->length) <= al_end &&
sle64_to_cpu(next_al_entry->lowest_vcn) <=
lowest_vcn &&
next_al_entry->type == al_entry->type &&
next_al_entry->name_length == al_name_len &&
ntfs_are_names_equal((ntfschar*)((u8*)
next_al_entry +
next_al_entry->name_offset),
next_al_entry->name_length,
al_name, al_name_len, CASE_SENSITIVE,
vol->upcase, vol->upcase_len))
continue;
if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
ntfs_error(vol->sb, "Found stale mft "
"reference in attribute list "
"of base inode 0x%lx.%s",
base_ni->mft_no, es);
err = -EIO;
break;
}
} else { /* Mft references do not match. */
/* If there is a mapped record unmap it first. */
if (ni != base_ni)
unmap_extent_mft_record(ni);
/* Do we want the base record back? */
if (MREF_LE(al_entry->mft_reference) ==
base_ni->mft_no) {
ni = ctx->ntfs_ino = base_ni;
ctx->mrec = ctx->base_mrec;
} else {
/* We want an extent record. */
ctx->mrec = map_extent_mft_record(base_ni,
le64_to_cpu(
al_entry->mft_reference), &ni);
if (IS_ERR(ctx->mrec)) {
ntfs_error(vol->sb, "Failed to map "
"extent mft record "
"0x%lx of base inode "
"0x%lx.%s",
MREF_LE(al_entry->
mft_reference),
base_ni->mft_no, es);
err = PTR_ERR(ctx->mrec);
if (err == -ENOENT)
err = -EIO;
/* Cause @ctx to be sanitized below. */
ni = NULL;
break;
}
ctx->ntfs_ino = ni;
}
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
}
/*
* ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
* mft record containing the attribute represented by the
* current al_entry.
*/
/*
* We could call into ntfs_attr_find() to find the right
* attribute in this mft record but this would be less
* efficient and not quite accurate as ntfs_attr_find() ignores
* the attribute instance numbers for example which become
* important when one plays with attribute lists. Also,
* because a proper match has been found in the attribute list
* entry above, the comparison can now be optimized. So it is
* worth re-implementing a simplified ntfs_attr_find() here.
*/
a = ctx->attr;
/*
* Use a manual loop so we can still use break and continue
* with the same meanings as above.
*/
do_next_attr_loop:
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated))
break;
if (a->type == AT_END)
break;
if (!a->length)
break;
if (al_entry->instance != a->instance)
goto do_next_attr;
/*
* If the type and/or the name are mismatched between the
* attribute list entry and the attribute record, there is
* corruption so we break and return error EIO.
*/
if (al_entry->type != a->type)
break;
if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)), a->name_length,
al_name, al_name_len, CASE_SENSITIVE,
vol->upcase, vol->upcase_len))
break;
ctx->attr = a;
/*
* If no @val specified or @val specified and it matches, we
* have found it!
*/
if (!val || (!a->non_resident && le32_to_cpu(
a->data.resident.value_length) == val_len &&
!memcmp((u8*)a +
le16_to_cpu(a->data.resident.value_offset),
val, val_len))) {
ntfs_debug("Done, found.");
return 0;
}
do_next_attr:
/* Proceed to the next attribute in the current mft record. */
a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
goto do_next_attr_loop;
}
if (!err) {
ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
"attribute list attribute.%s", base_ni->mft_no,
es);
err = -EIO;
}
if (ni != base_ni) {
if (ni)
unmap_extent_mft_record(ni);
ctx->ntfs_ino = base_ni;
ctx->mrec = ctx->base_mrec;
ctx->attr = ctx->base_attr;
}
if (err != -ENOMEM)
NVolSetErrors(vol);
return err;
not_found:
/*
* If we were looking for AT_END, we reset the search context @ctx and
* use ntfs_attr_find() to seek to the end of the base mft record.
*/
if (type == AT_END) {
ntfs_attr_reinit_search_ctx(ctx);
return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
ctx);
}
/*
* The attribute was not found. Before we return, we want to ensure
* @ctx->mrec and @ctx->attr indicate the position at which the
* attribute should be inserted in the base mft record. Since we also
* want to preserve @ctx->al_entry we cannot reinitialize the search
* context using ntfs_attr_reinit_search_ctx() as this would set
* @ctx->al_entry to NULL. Thus we do the necessary bits manually (see
* ntfs_attr_init_search_ctx() below). Note, we _only_ preserve
* @ctx->al_entry as the remaining fields (base_*) are identical to
* their non base_ counterparts and we cannot set @ctx->base_attr
* correctly yet as we do not know what @ctx->attr will be set to by
* the call to ntfs_attr_find() below.
*/
if (ni != base_ni)
unmap_extent_mft_record(ni);
ctx->mrec = ctx->base_mrec;
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
ctx->is_first = true;
ctx->ntfs_ino = base_ni;
ctx->base_ntfs_ino = NULL;
ctx->base_mrec = NULL;
ctx->base_attr = NULL;
/*
* In case there are multiple matches in the base mft record, need to
* keep enumerating until we get an attribute not found response (or
* another error), otherwise we would keep returning the same attribute
* over and over again and all programs using us for enumeration would
* lock up in a tight loop.
*/
do {
err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
ctx);
} while (!err);
ntfs_debug("Done, not found.");
return err;
}
/**
* ntfs_attr_lookup - find an attribute in an ntfs inode
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must
* be the base mft record and @ctx must have been obtained from a call to
* ntfs_attr_get_search_ctx().
*
* This function transparently handles attribute lists and @ctx is used to
* continue searches where they were left off at.
*
* After finishing with the attribute/mft record you need to call
* ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
* mapped inodes, etc).
*
* Return 0 if the search was successful and -errno if not.
*
* When 0, @ctx->attr is the found attribute and it is in mft record
* @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is
* the attribute list entry of the found attribute.
*
* When -ENOENT, @ctx->attr is the attribute which collates just after the
* attribute being searched for, i.e. if one wants to add the attribute to the
* mft record this is the correct place to insert it into. If an attribute
* list attribute is present, @ctx->al_entry is the attribute list entry which
* collates just after the attribute list entry of the attribute being searched
* for, i.e. if one wants to add the attribute to the mft record this is the
* correct place to insert its attribute list entry into.
*
* When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
* then undefined and in particular you should not rely on it not changing.
*/
int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
const VCN lowest_vcn, const u8 *val, const u32 val_len,
ntfs_attr_search_ctx *ctx)
{
ntfs_inode *base_ni;
ntfs_debug("Entering.");
BUG_ON(IS_ERR(ctx->mrec));
if (ctx->base_ntfs_ino)
base_ni = ctx->base_ntfs_ino;
else
base_ni = ctx->ntfs_ino;
/* Sanity check, just for debugging really. */
BUG_ON(!base_ni);
if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
return ntfs_attr_find(type, name, name_len, ic, val, val_len,
ctx);
return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
val, val_len, ctx);
}
/**
* ntfs_attr_init_search_ctx - initialize an attribute search context
* @ctx: attribute search context to initialize
* @ni: ntfs inode with which to initialize the search context
* @mrec: mft record with which to initialize the search context
*
* Initialize the attribute search context @ctx with @ni and @mrec.
*/
static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
ntfs_inode *ni, MFT_RECORD *mrec)
{
*ctx = (ntfs_attr_search_ctx) {
.mrec = mrec,
/* Sanity checks are performed elsewhere. */
.attr = (ATTR_RECORD*)((u8*)mrec +
le16_to_cpu(mrec->attrs_offset)),
.is_first = true,
.ntfs_ino = ni,
};
}
/**
* ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
* @ctx: attribute search context to reinitialize
*
* Reinitialize the attribute search context @ctx, unmapping an associated
* extent mft record if present, and initialize the search context again.
*
* This is used when a search for a new attribute is being started to reset
* the search context to the beginning.
*/
void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (likely(!ctx->base_ntfs_ino)) {
/* No attribute list. */
ctx->is_first = true;
/* Sanity checks are performed elsewhere. */
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
/*
* This needs resetting due to ntfs_external_attr_find() which
* can leave it set despite having zeroed ctx->base_ntfs_ino.
*/
ctx->al_entry = NULL;
return;
} /* Attribute list. */
if (ctx->ntfs_ino != ctx->base_ntfs_ino)
unmap_extent_mft_record(ctx->ntfs_ino);
ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
return;
}
/**
* ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
* @ni: ntfs inode with which to initialize the search context
* @mrec: mft record with which to initialize the search context
*
* Allocate a new attribute search context, initialize it with @ni and @mrec,
* and return it. Return NULL if allocation failed.
*/
ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
{
ntfs_attr_search_ctx *ctx;
ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
if (ctx)
ntfs_attr_init_search_ctx(ctx, ni, mrec);
return ctx;
}
/**
* ntfs_attr_put_search_ctx - release an attribute search context
* @ctx: attribute search context to free
*
* Release the attribute search context @ctx, unmapping an associated extent
* mft record if present.
*/
void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
unmap_extent_mft_record(ctx->ntfs_ino);
kmem_cache_free(ntfs_attr_ctx_cache, ctx);
return;
}
#ifdef NTFS_RW
/**
* ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to find
*
* Search for the attribute definition record corresponding to the attribute
* @type in the $AttrDef system file.
*
* Return the attribute type definition record if found and NULL if not found.
*/
static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
const ATTR_TYPE type)
{
ATTR_DEF *ad;
BUG_ON(!vol->attrdef);
BUG_ON(!type);
for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
vol->attrdef_size && ad->type; ++ad) {
/* We have not found it yet, carry on searching. */
if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
continue;
/* We found the attribute; return it. */
if (likely(ad->type == type))
return ad;
/* We have gone too far already. No point in continuing. */
break;
}
/* Attribute not found. */
ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
le32_to_cpu(type));
return NULL;
}
/**
* ntfs_attr_size_bounds_check - check a size of an attribute type for validity
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
* @size: size which to check
*
* Check whether the @size in bytes is valid for an attribute of @type on the
* ntfs volume @vol. This information is obtained from $AttrDef system file.
*
* Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
* listed in $AttrDef.
*/
int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
const s64 size)
{
ATTR_DEF *ad;
BUG_ON(size < 0);
/*
* $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
* listed in $AttrDef.
*/
if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
return -ERANGE;
/* Get the $AttrDef entry for the attribute @type. */
ad = ntfs_attr_find_in_attrdef(vol, type);
if (unlikely(!ad))
return -ENOENT;
/* Do the bounds check. */
if (((sle64_to_cpu(ad->min_size) > 0) &&
size < sle64_to_cpu(ad->min_size)) ||
((sle64_to_cpu(ad->max_size) > 0) && size >
sle64_to_cpu(ad->max_size)))
return -ERANGE;
return 0;
}
/**
* ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
*
* Check whether the attribute of @type on the ntfs volume @vol is allowed to
* be non-resident. This information is obtained from $AttrDef system file.
*
* Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
* -ENOENT if the attribute is not listed in $AttrDef.
*/
int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
{
ATTR_DEF *ad;
/* Find the attribute definition record in $AttrDef. */
ad = ntfs_attr_find_in_attrdef(vol, type);
if (unlikely(!ad))
return -ENOENT;
/* Check the flags and return the result. */
if (ad->flags & ATTR_DEF_RESIDENT)
return -EPERM;
return 0;
}
/**
* ntfs_attr_can_be_resident - check if an attribute can be resident
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
*
* Check whether the attribute of @type on the ntfs volume @vol is allowed to
* be resident. This information is derived from our ntfs knowledge and may
* not be completely accurate, especially when user defined attributes are
* present. Basically we allow everything to be resident except for index
* allocation and $EA attributes.
*
* Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
*
* Warning: In the system file $MFT the attribute $Bitmap must be non-resident
* otherwise windows will not boot (blue screen of death)! We cannot
* check for this here as we do not know which inode's $Bitmap is
* being asked about so the caller needs to special case this.
*/
int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
{
if (type == AT_INDEX_ALLOCATION)
return -EPERM;
return 0;
}
/**
* ntfs_attr_record_resize - resize an attribute record
* @m: mft record containing attribute record
* @a: attribute record to resize
* @new_size: new size in bytes to which to resize the attribute record @a
*
* Resize the attribute record @a, i.e. the resident part of the attribute, in
* the mft record @m to @new_size bytes.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -ENOSPC - Not enough space in the mft record @m to perform the resize.
*
* Note: On error, no modifications have been performed whatsoever.
*
* Warning: If you make a record smaller without having copied all the data you
* are interested in the data may be overwritten.
*/
int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
{
ntfs_debug("Entering for new_size %u.", new_size);
/* Align to 8 bytes if it is not already done. */
if (new_size & 7)
new_size = (new_size + 7) & ~7;
/* If the actual attribute length has changed, move things around. */
if (new_size != le32_to_cpu(a->length)) {
u32 new_muse = le32_to_cpu(m->bytes_in_use) -
le32_to_cpu(a->length) + new_size;
/* Not enough space in this mft record. */
if (new_muse > le32_to_cpu(m->bytes_allocated))
return -ENOSPC;
/* Move attributes following @a to their new location. */
memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
le32_to_cpu(m->bytes_in_use) - ((u8*)a -
(u8*)m) - le32_to_cpu(a->length));
/* Adjust @m to reflect the change in used space. */
m->bytes_in_use = cpu_to_le32(new_muse);
/* Adjust @a to reflect the new size. */
if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
a->length = cpu_to_le32(new_size);
}
return 0;
}
/**
* ntfs_resident_attr_value_resize - resize the value of a resident attribute
* @m: mft record containing attribute record
* @a: attribute record whose value to resize
* @new_size: new size in bytes to which to resize the attribute value of @a
*
* Resize the value of the attribute @a in the mft record @m to @new_size bytes.
* If the value is made bigger, the newly allocated space is cleared.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -ENOSPC - Not enough space in the mft record @m to perform the resize.
*
* Note: On error, no modifications have been performed whatsoever.
*
* Warning: If you make a record smaller without having copied all the data you
* are interested in the data may be overwritten.
*/
int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
const u32 new_size)
{
u32 old_size;
/* Resize the resident part of the attribute record. */
if (ntfs_attr_record_resize(m, a,
le16_to_cpu(a->data.resident.value_offset) + new_size))
return -ENOSPC;
/*
* The resize succeeded! If we made the attribute value bigger, clear
* the area between the old size and @new_size.
*/
old_size = le32_to_cpu(a->data.resident.value_length);
if (new_size > old_size)
memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
old_size, 0, new_size - old_size);
/* Finally update the length of the attribute value. */
a->data.resident.value_length = cpu_to_le32(new_size);
return 0;
}
/**
* ntfs_attr_make_non_resident - convert a resident to a non-resident attribute
* @ni: ntfs inode describing the attribute to convert
* @data_size: size of the resident data to copy to the non-resident attribute
*
* Convert the resident ntfs attribute described by the ntfs inode @ni to a
* non-resident one.
*
* @data_size must be equal to the attribute value size. This is needed since
* we need to know the size before we can map the mft record and our callers
* always know it. The reason we cannot simply read the size from the vfs
* inode i_size is that this is not necessarily uptodate. This happens when
* ntfs_attr_make_non_resident() is called in the ->truncate call path(s).
*
* Return 0 on success and -errno on error. The following error return codes
* are defined:
* -EPERM - The attribute is not allowed to be non-resident.
* -ENOMEM - Not enough memory.
* -ENOSPC - Not enough disk space.
* -EINVAL - Attribute not defined on the volume.
* -EIO - I/o error or other error.
* Note that -ENOSPC is also returned in the case that there is not enough
* space in the mft record to do the conversion. This can happen when the mft
* record is already very full. The caller is responsible for trying to make
* space in the mft record and trying again. FIXME: Do we need a separate
* error return code for this kind of -ENOSPC or is it always worth trying
* again in case the attribute may then fit in a resident state so no need to
* make it non-resident at all? Ho-hum... (AIA)
*
* NOTE to self: No changes in the attribute list are required to move from
* a resident to a non-resident attribute.
*
* Locking: - The caller must hold i_mutex on the inode.
*/
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
{
s64 new_size;
struct inode *vi = VFS_I(ni);
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
struct page *page;
runlist_element *rl;
u8 *kaddr;
unsigned long flags;
int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
u32 attr_size;
u8 old_res_attr_flags;
/* Check that the attribute is allowed to be non-resident. */
err = ntfs_attr_can_be_non_resident(vol, ni->type);
if (unlikely(err)) {
if (err == -EPERM)
ntfs_debug("Attribute is not allowed to be "
"non-resident.");
else
ntfs_debug("Attribute not defined on the NTFS "
"volume!");
return err;
}
/*
* FIXME: Compressed and encrypted attributes are not supported when
* writing and we should never have gotten here for them.
*/
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoEncrypted(ni));
/*
* The size needs to be aligned to a cluster boundary for allocation
* purposes.
*/
new_size = (data_size + vol->cluster_size - 1) &
~(vol->cluster_size - 1);
if (new_size > 0) {
/*
* Will need the page later and since the page lock nests
* outside all ntfs locks, we need to get the page now.
*/
page = find_or_create_page(vi->i_mapping, 0,
mapping_gfp_mask(vi->i_mapping));
if (unlikely(!page))
return -ENOMEM;
/* Start by allocating clusters to hold the attribute value. */
rl = ntfs_cluster_alloc(vol, 0, new_size >>
vol->cluster_size_bits, -1, DATA_ZONE, true);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
ntfs_debug("Failed to allocate cluster%s, error code "
"%i.", (new_size >>
vol->cluster_size_bits) > 1 ? "s" : "",
err);
goto page_err_out;
}
} else {
rl = NULL;
page = NULL;
}
/* Determine the size of the mapping pairs array. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
if (unlikely(mp_size < 0)) {
err = mp_size;
ntfs_debug("Failed to get size for mapping pairs array, error "
"code %i.", err);
goto rl_err_out;
}
down_write(&ni->runlist.lock);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
BUG_ON(NInoNonResident(ni));
BUG_ON(a->non_resident);
/*
* Calculate new offsets for the name and the mapping pairs array.
*/
if (NInoSparse(ni) || NInoCompressed(ni))
name_ofs = (offsetof(ATTR_REC,
data.non_resident.compressed_size) +
sizeof(a->data.non_resident.compressed_size) +
7) & ~7;
else
name_ofs = (offsetof(ATTR_REC,
data.non_resident.compressed_size) + 7) & ~7;
mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
/*
* Determine the size of the resident part of the now non-resident
* attribute record.
*/
arec_size = (mp_ofs + mp_size + 7) & ~7;
/*
* If the page is not uptodate bring it uptodate by copying from the
* attribute value.
*/
attr_size = le32_to_cpu(a->data.resident.value_length);
BUG_ON(attr_size != data_size);
if (page && !PageUptodate(page)) {
kaddr = kmap_atomic(page);
memcpy(kaddr, (u8*)a +
le16_to_cpu(a->data.resident.value_offset),
attr_size);
memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
}
/* Backup the attribute flag. */
old_res_attr_flags = a->data.resident.flags;
/* Resize the resident part of the attribute record. */
err = ntfs_attr_record_resize(m, a, arec_size);
if (unlikely(err))
goto err_out;
/*
* Convert the resident part of the attribute record to describe a
* non-resident attribute.
*/
a->non_resident = 1;
/* Move the attribute name if it exists and update the offset. */
if (a->name_length)
memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
a->name_length * sizeof(ntfschar));
a->name_offset = cpu_to_le16(name_ofs);
/* Setup the fields specific to non-resident attributes. */
a->data.non_resident.lowest_vcn = 0;
a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
vol->cluster_size_bits);
a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
memset(&a->data.non_resident.reserved, 0,
sizeof(a->data.non_resident.reserved));
a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
a->data.non_resident.data_size =
a->data.non_resident.initialized_size =
cpu_to_sle64(attr_size);
if (NInoSparse(ni) || NInoCompressed(ni)) {
a->data.non_resident.compression_unit = 0;
if (NInoCompressed(ni) || vol->major_ver < 3)
a->data.non_resident.compression_unit = 4;
a->data.non_resident.compressed_size =
a->data.non_resident.allocated_size;
} else
a->data.non_resident.compression_unit = 0;
/* Generate the mapping pairs array into the attribute record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
arec_size - mp_ofs, rl, 0, -1, NULL);
if (unlikely(err)) {
ntfs_debug("Failed to build mapping pairs, error code %i.",
err);
goto undo_err_out;
}
/* Setup the in-memory attribute structure to be non-resident. */
ni->runlist.rl = rl;
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_size;
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size = ni->allocated_size;
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U << (a->data.
non_resident.compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.compressed.block_size) -
1;
ni->itype.compressed.block_clusters = 1U <<
a->data.non_resident.compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits = 0;
ni->itype.compressed.block_clusters = 0;
}
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = ni->allocated_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
/*
* This needs to be last since the address space operations ->read_folio
* and ->writepage can run concurrently with us as they are not
* serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
*/
NInoSetNonResident(ni);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
if (page) {
set_page_dirty(page);
unlock_page(page);
put_page(page);
}
ntfs_debug("Done.");
return 0;
undo_err_out:
/* Convert the attribute back into a resident attribute. */
a->non_resident = 0;
/* Move the attribute name if it exists and update the offset. */
name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
sizeof(a->data.resident.reserved) + 7) & ~7;
if (a->name_length)
memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
a->name_length * sizeof(ntfschar));
mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
a->name_offset = cpu_to_le16(name_ofs);
arec_size = (mp_ofs + attr_size + 7) & ~7;
/* Resize the resident part of the attribute record. */
err2 = ntfs_attr_record_resize(m, a, arec_size);
if (unlikely(err2)) {
/*
* This cannot happen (well if memory corruption is at work it
* could happen in theory), but deal with it as well as we can.
* If the old size is too small, truncate the attribute,
* otherwise simply give it a larger allocated size.
* FIXME: Should check whether chkdsk complains when the
* allocated size is much bigger than the resident value size.
*/
arec_size = le32_to_cpu(a->length);
if ((mp_ofs + attr_size) > arec_size) {
err2 = attr_size;
attr_size = arec_size - mp_ofs;
ntfs_error(vol->sb, "Failed to undo partial resident "
"to non-resident attribute "
"conversion. Truncating inode 0x%lx, "
"attribute type 0x%x from %i bytes to "
"%i bytes to maintain metadata "
"consistency. THIS MEANS YOU ARE "
"LOSING %i BYTES DATA FROM THIS %s.",
vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
err2, attr_size, err2 - attr_size,
((ni->type == AT_DATA) &&
!ni->name_len) ? "FILE": "ATTRIBUTE");
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = attr_size;
i_size_write(vi, attr_size);
write_unlock_irqrestore(&ni->size_lock, flags);
}
}
/* Setup the fields specific to resident attributes. */
a->data.resident.value_length = cpu_to_le32(attr_size);
a->data.resident.value_offset = cpu_to_le16(mp_ofs);
a->data.resident.flags = old_res_attr_flags;
memset(&a->data.resident.reserved, 0,
sizeof(a->data.resident.reserved));
/* Copy the data from the page back to the attribute value. */
if (page) {
kaddr = kmap_atomic(page);
memcpy((u8*)a + mp_ofs, kaddr, attr_size);
kunmap_atomic(kaddr);
}
/* Setup the allocated size in the ntfs inode in case it changed. */
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = arec_size - mp_ofs;
write_unlock_irqrestore(&ni->size_lock, flags);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
ni->runlist.rl = NULL;
up_write(&ni->runlist.lock);
rl_err_out:
if (rl) {
if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
ntfs_error(vol->sb, "Failed to release allocated "
"cluster(s) in error code path. Run "
"chkdsk to recover the lost "
"cluster(s).");
NVolSetErrors(vol);
}
ntfs_free(rl);
page_err_out:
unlock_page(page);
put_page(page);
}
if (err == -EINVAL)
err = -EIO;
return err;
}
/**
* ntfs_attr_extend_allocation - extend the allocated space of an attribute
* @ni: ntfs inode of the attribute whose allocation to extend
* @new_alloc_size: new size in bytes to which to extend the allocation to
* @new_data_size: new size in bytes to which to extend the data to
* @data_start: beginning of region which is required to be non-sparse
*
* Extend the allocated space of an attribute described by the ntfs inode @ni
* to @new_alloc_size bytes. If @data_start is -1, the whole extension may be
* implemented as a hole in the file (as long as both the volume and the ntfs
* inode @ni have sparse support enabled). If @data_start is >= 0, then the
* region between the old allocated size and @data_start - 1 may be made sparse
* but the regions between @data_start and @new_alloc_size must be backed by
* actual clusters.
*
* If @new_data_size is -1, it is ignored. If it is >= 0, then the data size
* of the attribute is extended to @new_data_size. Note that the i_size of the
* vfs inode is not updated. Only the data size in the base attribute record
* is updated. The caller has to update i_size separately if this is required.
* WARNING: It is a BUG() for @new_data_size to be smaller than the old data
* size as well as for @new_data_size to be greater than @new_alloc_size.
*
* For resident attributes this involves resizing the attribute record and if
* necessary moving it and/or other attributes into extent mft records and/or
* converting the attribute to a non-resident attribute which in turn involves
* extending the allocation of a non-resident attribute as described below.
*
* For non-resident attributes this involves allocating clusters in the data
* zone on the volume (except for regions that are being made sparse) and
* extending the run list to describe the allocated clusters as well as
* updating the mapping pairs array of the attribute. This in turn involves
* resizing the attribute record and if necessary moving it and/or other
* attributes into extent mft records and/or splitting the attribute record
* into multiple extent attribute records.
*
* Also, the attribute list attribute is updated if present and in some of the
* above cases (the ones where extent mft records/attributes come into play),
* an attribute list attribute is created if not already present.
*
* Return the new allocated size on success and -errno on error. In the case
* that an error is encountered but a partial extension at least up to
* @data_start (if present) is possible, the allocation is partially extended
* and this is returned. This means the caller must check the returned size to
* determine if the extension was partial. If @data_start is -1 then partial
* allocations are not performed.
*
* WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
*
* Locking: This function takes the runlist lock of @ni for writing as well as
* locking the mft record of the base ntfs inode. These locks are maintained
* throughout execution of the function. These locks are required so that the
* attribute can be resized safely and so that it can for example be converted
* from resident to non-resident safely.
*
* TODO: At present attribute list attribute handling is not implemented.
*
* TODO: At present it is not safe to call this function for anything other
* than the $DATA attribute(s) of an uncompressed and unencrypted file.
*/
s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
const s64 new_data_size, const s64 data_start)
{
VCN vcn;
s64 ll, allocated_size, start = data_start;
struct inode *vi = VFS_I(ni);
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
runlist_element *rl, *rl2;
unsigned long flags;
int err, mp_size;
u32 attr_len = 0; /* Silence stupid gcc warning. */
bool mp_rebuilt;
#ifdef DEBUG
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
"old_allocated_size 0x%llx, "
"new_allocated_size 0x%llx, new_data_size 0x%llx, "
"data_start 0x%llx.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
(unsigned long long)allocated_size,
(unsigned long long)new_alloc_size,
(unsigned long long)new_data_size,
(unsigned long long)start);
#endif
retry_extend:
/*
* For non-resident attributes, @start and @new_size need to be aligned
* to cluster boundaries for allocation purposes.
*/
if (NInoNonResident(ni)) {
if (start > 0)
start &= ~(s64)vol->cluster_size_mask;
new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
~(s64)vol->cluster_size_mask;
}
BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
/* Check if new size is allowed in $AttrDef. */
err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
if (unlikely(err)) {
/* Only emit errors when the write will fail completely. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size) {
if (err == -ERANGE) {
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because the new "
"allocation would exceed the "
"maximum allowed size for "
"this attribute type.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
} else {
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because this "
"attribute type is not "
"defined on the NTFS volume. "
"Possible corruption! You "
"should run chkdsk!",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
}
}
/* Translate error code to be POSIX conformant for write(2). */
if (err == -ERANGE)
err = -EFBIG;
else
err = -EIO;
return err;
}
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/*
* We will be modifying both the runlist (if non-resident) and the mft
* record so lock them both down.
*/
down_write(&ni->runlist.lock);
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/*
* If non-resident, seek to the last extent. If resident, there is
* only one extent, so seek to that.
*/
vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
0;
/*
* Abort if someone did the work whilst we waited for the locks. If we
* just converted the attribute from resident to non-resident it is
* likely that exactly this has happened already. We cannot quite
* abort if we need to update the data size.
*/
if (unlikely(new_alloc_size <= allocated_size)) {
ntfs_debug("Allocated size already exceeds requested size.");
new_alloc_size = allocated_size;
if (new_data_size < 0)
goto done;
/*
* We want the first attribute extent so that we can update the
* data size.
*/
vcn = 0;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
/* Use goto to reduce indentation. */
if (a->non_resident)
goto do_non_resident_extend;
BUG_ON(NInoNonResident(ni));
/* The total length of the attribute value. */
attr_len = le32_to_cpu(a->data.resident.value_length);
/*
* Extend the attribute record to be able to store the new attribute
* size. ntfs_attr_record_resize() will not do anything if the size is
* not changing.
*/
if (new_alloc_size < vol->mft_record_size &&
!ntfs_attr_record_resize(m, a,
le16_to_cpu(a->data.resident.value_offset) +
new_alloc_size)) {
/* The resize succeeded! */
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
write_unlock_irqrestore(&ni->size_lock, flags);
if (new_data_size >= 0) {
BUG_ON(new_data_size < attr_len);
a->data.resident.value_length =
cpu_to_le32((u32)new_data_size);
}
goto flush_done;
}
/*
* We have to drop all the locks so we can call
* ntfs_attr_make_non_resident(). This could be optimised by try-
* locking the first page cache page and only if that fails dropping
* the locks, locking the page, and redoing all the locking and
* lookups. While this would be a huge optimisation, it is not worth
* it as this is definitely a slow code path.
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* Not enough space in the mft record, try to make the attribute
* non-resident and if successful restart the extension process.
*/
err = ntfs_attr_make_non_resident(ni, attr_len);
if (likely(!err))
goto retry_extend;
/*
* Could not make non-resident. If this is due to this not being
* permitted for this attribute type or there not being enough space,
* try to make other attributes non-resident. Otherwise fail.
*/
if (unlikely(err != -EPERM && err != -ENOSPC)) {
/* Only emit errors when the write will fail completely. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the conversion from resident "
"to non-resident attribute failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
goto conv_err_out;
}
/* TODO: Not implemented from here, abort. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size) {
if (err == -ENOSPC)
ntfs_error(vol->sb, "Not enough space in the mft "
"record/on disk for the non-resident "
"attribute value. This case is not "
"implemented yet.");
else /* if (err == -EPERM) */
ntfs_error(vol->sb, "This attribute type may not be "
"non-resident. This case is not "
"implemented yet.");
}
err = -EOPNOTSUPP;
goto conv_err_out;
#if 0
// TODO: Attempt to make other attributes non-resident.
if (!err)
goto do_resident_extend;
/*
* Both the attribute list attribute and the standard information
* attribute must remain in the base inode. Thus, if this is one of
* these attributes, we have to try to move other attributes out into
* extent mft records instead.
*/
if (ni->type == AT_ATTRIBUTE_LIST ||
ni->type == AT_STANDARD_INFORMATION) {
// TODO: Attempt to move other attributes into extent mft
// records.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
goto err_out;
}
// TODO: Attempt to move this attribute to an extent mft record, but
// only if it is not already the only attribute in an mft record in
// which case there would be nothing to gain.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
/* There is nothing we can do to make enough space. )-: */
goto err_out;
#endif
do_non_resident_extend:
BUG_ON(!NInoNonResident(ni));
if (new_alloc_size == allocated_size) {
BUG_ON(vcn);
goto alloc_done;
}
/*
* If the data starts after the end of the old allocation, this is a
* $DATA attribute and sparse attributes are enabled on the volume and
* for this inode, then create a sparse region between the old
* allocated size and the start of the data. Otherwise simply proceed
* with filling the whole space between the old allocated size and the
* new allocated size with clusters.
*/
if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
!NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
goto skip_sparse;
// TODO: This is not implemented yet. We just fill in with real
// clusters for now...
ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
"allocating real clusters instead.");
skip_sparse:
rl = ni->runlist.rl;
if (likely(rl)) {
/* Seek to the end of the runlist. */
while (rl->length)
rl++;
}
/* If this attribute extent is not mapped, map it now. */
if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
if (!rl && !allocated_size)
goto first_alloc;
rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because the "
"mapping of a runlist "
"fragment failed with error "
"code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
err);
if (err != -ENOMEM)
err = -EIO;
goto err_out;
}
ni->runlist.rl = rl;
/* Seek to the end of the runlist. */
while (rl->length)
rl++;
}
/*
* We now know the runlist of the last extent is mapped and @rl is at
* the end of the runlist. We want to begin allocating clusters
* starting at the last allocated cluster to reduce fragmentation. If
* there are no valid LCNs in the attribute we let the cluster
* allocator choose the starting cluster.
*/
/* If the last LCN is a hole or simillar seek back to last real LCN. */
while (rl->lcn < 0 && rl > ni->runlist.rl)
rl--;
first_alloc:
// FIXME: Need to implement partial allocations so at least part of the
// write can be performed when start >= 0. (Needed for POSIX write(2)
// conformance.)
rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
(new_alloc_size - allocated_size) >>
vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
rl->lcn + rl->length : -1, DATA_ZONE, true);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the allocation of clusters "
"failed with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM && err != -ENOSPC)
err = -EIO;
goto err_out;
}
rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the runlist merge failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
if (ntfs_cluster_free_from_rl(vol, rl2)) {
ntfs_error(vol->sb, "Failed to release allocated "
"cluster(s) in error code path. Run "
"chkdsk to recover the lost "
"cluster(s).");
NVolSetErrors(vol);
}
ntfs_free(rl2);
goto err_out;
}
ni->runlist.rl = rl;
ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
allocated_size) >> vol->cluster_size_bits);
/* Find the runlist element with which the attribute extent starts. */
ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
BUG_ON(!rl2);
BUG_ON(!rl2->length);
BUG_ON(rl2->lcn < LCN_HOLE);
mp_rebuilt = false;
/* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) {
err = mp_size;
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because determining the size for the "
"mapping pairs failed with error code "
"%i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
goto undo_alloc;
}
/* Extend the attribute record to fit the bigger mapping pairs array. */
attr_len = le32_to_cpu(a->length);
err = ntfs_attr_record_resize(m, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
if (unlikely(err)) {
BUG_ON(err != -ENOSPC);
// TODO: Deal with this by moving this extent to a new mft
// record or by starting a new extent in a new mft record,
// possibly by extending this extent partially and filling it
// and creating a new extent for the remainder, or by making
// other attributes non-resident and/or by moving other
// attributes out of this mft record.
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Not enough space in the mft "
"record for the extended attribute "
"record. This case is not "
"implemented yet.");
err = -EOPNOTSUPP;
goto undo_alloc;
}
mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, rl2, ll, -1, NULL);
if (unlikely(err)) {
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because building the mapping pairs "
"failed with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
goto undo_alloc;
}
/* Update the highest_vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
vol->cluster_size_bits) - 1);
/*
* We now have extended the allocated size of the attribute. Reflect
* this in the ntfs_inode structure and the attribute record.
*/
if (a->data.non_resident.lowest_vcn) {
/*
* We are not in the first attribute extent, switch to it, but
* first ensure the changes will make it to disk later.
*/
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto restore_undo_alloc;
/* @m is not used any more so no need to set it. */
a = ctx->attr;
}
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
/*
* FIXME: This would fail if @ni is a directory, $MFT, or an index,
* since those can have sparse/compressed set. For example can be
* set compressed even though it is not compressed itself and in that
* case the bit means that files are to be created compressed in the
* directory... At present this is ok as this code is only called for
* regular files, and only for their $DATA attribute(s).
* FIXME: The calculation is wrong if we created a hole above. For now
* it does not matter as we never create holes.
*/
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size += new_alloc_size - allocated_size;
a->data.non_resident.compressed_size =
cpu_to_sle64(ni->itype.compressed.size);
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
alloc_done:
if (new_data_size >= 0) {
BUG_ON(new_data_size <
sle64_to_cpu(a->data.non_resident.data_size));
a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
}
flush_done:
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
done:
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
ntfs_debug("Done, new_allocated_size 0x%llx.",
(unsigned long long)new_alloc_size);
return new_alloc_size;
restore_undo_alloc:
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot complete extension of allocation "
"of inode 0x%lx, attribute type 0x%x, because "
"lookup of first attribute extent failed with "
"error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err == -ENOENT)
err = -EIO;
ntfs_attr_reinit_search_ctx(ctx);
if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
allocated_size >> vol->cluster_size_bits, NULL, 0,
ctx)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"attribute in error code path. Run chkdsk to "
"recover.");
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
/*
* FIXME: This would fail if @ni is a directory... See above.
* FIXME: The calculation is wrong if we created a hole above.
* For now it does not matter as we never create holes.
*/
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size += new_alloc_size -
allocated_size;
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* The only thing that is now wrong is the allocated size of the
* base attribute extent which chkdsk should be able to fix.
*/
NVolSetErrors(vol);
return err;
}
ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
(allocated_size >> vol->cluster_size_bits) - 1);
undo_alloc:
ll = allocated_size >> vol->cluster_size_bits;
if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
"in error code path. Run chkdsk to recover "
"the lost cluster(s).");
NVolSetErrors(vol);
}
m = ctx->mrec;
a = ctx->attr;
/*
* If the runlist truncation fails and/or the search context is no
* longer valid, we cannot resize the attribute record or build the
* mapping pairs array thus we mark the inode bad so that no access to
* the freed clusters can happen.
*/
if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
ntfs_error(vol->sb, "Failed to %s in error code path. Run "
"chkdsk to recover.", IS_ERR(m) ?
"restore attribute search context" :
"truncate attribute runlist");
NVolSetErrors(vol);
} else if (mp_rebuilt) {
if (ntfs_attr_record_resize(m, a, attr_len)) {
ntfs_error(vol->sb, "Failed to restore attribute "
"record in error code path. Run "
"chkdsk to recover.");
NVolSetErrors(vol);
} else /* if (success) */ {
if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
a->data.non_resident.
mapping_pairs_offset), attr_len -
le16_to_cpu(a->data.non_resident.
mapping_pairs_offset), rl2, ll, -1,
NULL)) {
ntfs_error(vol->sb, "Failed to restore "
"mapping pairs array in error "
"code path. Run chkdsk to "
"recover.");
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
}
}
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
conv_err_out:
ntfs_debug("Failed. Returning error code %i.", err);
return err;
}
/**
* ntfs_attr_set - fill (a part of) an attribute with a byte
* @ni: ntfs inode describing the attribute to fill
* @ofs: offset inside the attribute at which to start to fill
* @cnt: number of bytes to fill
* @val: the unsigned 8-bit value with which to fill the attribute
*
* Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
* byte offset @ofs inside the attribute with the constant byte @val.
*
* This function is effectively like memset() applied to an ntfs attribute.
* Note this function actually only operates on the page cache pages belonging
* to the ntfs attribute and it marks them dirty after doing the memset().
* Thus it relies on the vm dirty page write code paths to cause the modified
* pages to be written to the mft record/disk.
*
* Return 0 on success and -errno on error. An error code of -ESPIPE means
* that @ofs + @cnt were outside the end of the attribute and no write was
* performed.
*/
int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
{
ntfs_volume *vol = ni->vol;
struct address_space *mapping;
struct page *page;
u8 *kaddr;
pgoff_t idx, end;
unsigned start_ofs, end_ofs, size;
ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
(long long)ofs, (long long)cnt, val);
BUG_ON(ofs < 0);
BUG_ON(cnt < 0);
if (!cnt)
goto done;
/*
* FIXME: Compressed and encrypted attributes are not supported when
* writing and we should never have gotten here for them.
*/
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoEncrypted(ni));
mapping = VFS_I(ni)->i_mapping;
/* Work out the starting index and page offset. */
idx = ofs >> PAGE_SHIFT;
start_ofs = ofs & ~PAGE_MASK;
/* Work out the ending index and page offset. */
end = ofs + cnt;
end_ofs = end & ~PAGE_MASK;
/* If the end is outside the inode size return -ESPIPE. */
if (unlikely(end > i_size_read(VFS_I(ni)))) {
ntfs_error(vol->sb, "Request exceeds end of attribute.");
return -ESPIPE;
}
end >>= PAGE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
"page (error, index 0x%lx).", idx);
return PTR_ERR(page);
}
/*
* If the last page is the same as the first page, need to
* limit the write to the end offset.
*/
size = PAGE_SIZE;
if (idx == end)
size = end_ofs;
kaddr = kmap_atomic(page);
memset(kaddr + start_ofs, val, size - start_ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
put_page(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
if (idx == end)
goto done;
idx++;
}
/* Do the whole pages the fast way. */
for (; idx < end; idx++) {
/* Find or create the current page. (The page is locked.) */
page = grab_cache_page(mapping, idx);
if (unlikely(!page)) {
ntfs_error(vol->sb, "Insufficient memory to grab "
"page (index 0x%lx).", idx);
return -ENOMEM;
}
kaddr = kmap_atomic(page);
memset(kaddr, val, PAGE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr);
/*
* If the page has buffers, mark them uptodate since buffer
* state and not page state is definitive in 2.6 kernels.
*/
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
bh = head = page_buffers(page);
do {
set_buffer_uptodate(bh);
} while ((bh = bh->b_this_page) != head);
}
/* Now that buffers are uptodate, set the page uptodate, too. */
SetPageUptodate(page);
/*
* Set the page and all its buffers dirty and mark the inode
* dirty, too. The VM will write the page later on.
*/
set_page_dirty(page);
/* Finally unlock and release the page. */
unlock_page(page);
put_page(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
"(error, index 0x%lx).", idx);
return PTR_ERR(page);
}
kaddr = kmap_atomic(page);
memset(kaddr, val, end_ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
put_page(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
done:
ntfs_debug("Done.");
return 0;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/attrib.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
*/
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/gfp.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/sched/signal.h>
#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/writeback.h>
#include <asm/page.h>
#include <linux/uaccess.h>
#include "attrib.h"
#include "bitmap.h"
#include "inode.h"
#include "debug.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "ntfs.h"
/**
* ntfs_file_open - called when an inode is about to be opened
* @vi: inode to be opened
* @filp: file structure describing the inode
*
* Limit file size to the page cache limit on architectures where unsigned long
* is 32-bits. This is the most we can do for now without overflowing the page
* cache page index. Doing it this way means we don't run into problems because
* of existing too large files. It would be better to allow the user to read
* the beginning of the file but I doubt very much anyone is going to hit this
* check on a 32-bit architecture, so there is no point in adding the extra
* complexity required to support this.
*
* On 64-bit architectures, the check is hopefully optimized away by the
* compiler.
*
* After the check passes, just call generic_file_open() to do its work.
*/
static int ntfs_file_open(struct inode *vi, struct file *filp)
{
if (sizeof(unsigned long) < 8) {
if (i_size_read(vi) > MAX_LFS_FILESIZE)
return -EOVERFLOW;
}
return generic_file_open(vi, filp);
}
#ifdef NTFS_RW
/**
* ntfs_attr_extend_initialized - extend the initialized size of an attribute
* @ni: ntfs inode of the attribute to extend
* @new_init_size: requested new initialized size in bytes
*
* Extend the initialized size of an attribute described by the ntfs inode @ni
* to @new_init_size bytes. This involves zeroing any non-sparse space between
* the old initialized size and @new_init_size both in the page cache and on
* disk (if relevant complete pages are already uptodate in the page cache then
* these are simply marked dirty).
*
* As a side-effect, the file size (vfs inode->i_size) may be incremented as,
* in the resident attribute case, it is tied to the initialized size and, in
* the non-resident attribute case, it may not fall below the initialized size.
*
* Note that if the attribute is resident, we do not need to touch the page
* cache at all. This is because if the page cache page is not uptodate we
* bring it uptodate later, when doing the write to the mft record since we
* then already have the page mapped. And if the page is uptodate, the
* non-initialized region will already have been zeroed when the page was
* brought uptodate and the region may in fact already have been overwritten
* with new data via mmap() based writes, so we cannot just zero it. And since
* POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
* is unspecified, we choose not to do zeroing and thus we do not need to touch
* the page at all. For a more detailed explanation see ntfs_truncate() in
* fs/ntfs/inode.c.
*
* Return 0 on success and -errno on error. In the case that an error is
* encountered it is possible that the initialized size will already have been
* incremented some way towards @new_init_size but it is guaranteed that if
* this is the case, the necessary zeroing will also have happened and that all
* metadata is self-consistent.
*
* Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
* held by the caller.
*/
static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
{
s64 old_init_size;
loff_t old_i_size;
pgoff_t index, end_index;
unsigned long flags;
struct inode *vi = VFS_I(ni);
ntfs_inode *base_ni;
MFT_RECORD *m = NULL;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx = NULL;
struct address_space *mapping;
struct page *page = NULL;
u8 *kattr;
int err;
u32 attr_len;
read_lock_irqsave(&ni->size_lock, flags);
old_init_size = ni->initialized_size;
old_i_size = i_size_read(vi);
BUG_ON(new_init_size > ni->allocated_size);
read_unlock_irqrestore(&ni->size_lock, flags);
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
"old_initialized_size 0x%llx, "
"new_initialized_size 0x%llx, i_size 0x%llx.",
vi->i_ino, (unsigned)le32_to_cpu(ni->type),
(unsigned long long)old_init_size,
(unsigned long long)new_init_size, old_i_size);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/* Use goto to reduce indentation and we need the label below anyway. */
if (NInoNonResident(ni))
goto do_non_resident_extend;
BUG_ON(old_init_size != old_i_size);
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
BUG_ON(a->non_resident);
/* The total length of the attribute value. */
attr_len = le32_to_cpu(a->data.resident.value_length);
BUG_ON(old_i_size != (loff_t)attr_len);
/*
* Do the zeroing in the mft record and update the attribute size in
* the mft record.
*/
kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
memset(kattr + attr_len, 0, new_init_size - attr_len);
a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
/* Finally, update the sizes in the vfs and ntfs inodes. */
write_lock_irqsave(&ni->size_lock, flags);
i_size_write(vi, new_init_size);
ni->initialized_size = new_init_size;
write_unlock_irqrestore(&ni->size_lock, flags);
goto done;
do_non_resident_extend:
/*
* If the new initialized size @new_init_size exceeds the current file
* size (vfs inode->i_size), we need to extend the file size to the
* new initialized size.
*/
if (new_init_size > old_i_size) {
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
BUG_ON(!a->non_resident);
BUG_ON(old_i_size != (loff_t)
sle64_to_cpu(a->data.non_resident.data_size));
a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
/* Update the file size in the vfs inode. */
i_size_write(vi, new_init_size);
ntfs_attr_put_search_ctx(ctx);
ctx = NULL;
unmap_mft_record(base_ni);
m = NULL;
}
mapping = vi->i_mapping;
index = old_init_size >> PAGE_SHIFT;
end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
do {
/*
* Read the page. If the page is not present, this will zero
* the uninitialized regions for us.
*/
page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto init_err_out;
}
/*
* Update the initialized size in the ntfs inode. This is
* enough to make ntfs_writepage() work.
*/
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT;
if (ni->initialized_size > new_init_size)
ni->initialized_size = new_init_size;
write_unlock_irqrestore(&ni->size_lock, flags);
/* Set the page dirty so it gets written out. */
set_page_dirty(page);
put_page(page);
/*
* Play nice with the vm and the rest of the system. This is
* very much needed as we can potentially be modifying the
* initialised size from a very small value to a really huge
* value, e.g.
* f = open(somefile, O_TRUNC);
* truncate(f, 10GiB);
* seek(f, 10GiB);
* write(f, 1);
* And this would mean we would be marking dirty hundreds of
* thousands of pages or as in the above example more than
* two and a half million pages!
*
* TODO: For sparse pages could optimize this workload by using
* the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
* would be set in read_folio for sparse pages and here we would
* not need to mark dirty any pages which have this bit set.
* The only caveat is that we have to clear the bit everywhere
* where we allocate any clusters that lie in the page or that
* contain the page.
*
* TODO: An even greater optimization would be for us to only
* call read_folio() on pages which are not in sparse regions as
* determined from the runlist. This would greatly reduce the
* number of pages we read and make dirty in the case of sparse
* files.
*/
balance_dirty_pages_ratelimited(mapping);
cond_resched();
} while (++index < end_index);
read_lock_irqsave(&ni->size_lock, flags);
BUG_ON(ni->initialized_size != new_init_size);
read_unlock_irqrestore(&ni->size_lock, flags);
/* Now bring in sync the initialized_size in the mft record. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
goto init_err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto init_err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto init_err_out;
}
m = ctx->mrec;
a = ctx->attr;
BUG_ON(!a->non_resident);
a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
done:
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
(unsigned long long)new_init_size, i_size_read(vi));
return 0;
init_err_out:
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = old_init_size;
write_unlock_irqrestore(&ni->size_lock, flags);
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
ntfs_debug("Failed. Returning error code %i.", err);
return err;
}
static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb,
struct iov_iter *from)
{
loff_t pos;
s64 end, ll;
ssize_t err;
unsigned long flags;
struct file *file = iocb->ki_filp;
struct inode *vi = file_inode(file);
ntfs_inode *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
"0x%llx, count 0x%zx.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
(unsigned long long)iocb->ki_pos,
iov_iter_count(from));
err = generic_write_checks(iocb, from);
if (unlikely(err <= 0))
goto out;
/*
* All checks have passed. Before we start doing any writing we want
* to abort any totally illegal writes.
*/
BUG_ON(NInoMstProtected(ni));
BUG_ON(ni->type != AT_DATA);
/* If file is encrypted, deny access, just like NT4. */
if (NInoEncrypted(ni)) {
/* Only $DATA attributes can be encrypted. */
/*
* Reminder for later: Encrypted files are _always_
* non-resident so that the content can always be encrypted.
*/
ntfs_debug("Denying write access to encrypted file.");
err = -EACCES;
goto out;
}
if (NInoCompressed(ni)) {
/* Only unnamed $DATA attribute can be compressed. */
BUG_ON(ni->name_len);
/*
* Reminder for later: If resident, the data is not actually
* compressed. Only on the switch to non-resident does
* compression kick in. This is in contrast to encrypted files
* (see above).
*/
ntfs_error(vi->i_sb, "Writing to compressed files is not "
"implemented yet. Sorry.");
err = -EOPNOTSUPP;
goto out;
}
err = file_remove_privs(file);
if (unlikely(err))
goto out;
/*
* Our ->update_time method always succeeds thus file_update_time()
* cannot fail either so there is no need to check the return code.
*/
file_update_time(file);
pos = iocb->ki_pos;
/* The first byte after the last cluster being written to. */
end = (pos + iov_iter_count(from) + vol->cluster_size_mask) &
~(u64)vol->cluster_size_mask;
/*
* If the write goes beyond the allocated size, extend the allocation
* to cover the whole of the write, rounded up to the nearest cluster.
*/
read_lock_irqsave(&ni->size_lock, flags);
ll = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (end > ll) {
/*
* Extend the allocation without changing the data size.
*
* Note we ensure the allocation is big enough to at least
* write some data but we do not require the allocation to be
* complete, i.e. it may be partial.
*/
ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
if (likely(ll >= 0)) {
BUG_ON(pos >= ll);
/* If the extension was partial truncate the write. */
if (end > ll) {
ntfs_debug("Truncating write to inode 0x%lx, "
"attribute type 0x%x, because "
"the allocation was only "
"partially extended.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
iov_iter_truncate(from, ll - pos);
}
} else {
err = ll;
read_lock_irqsave(&ni->size_lock, flags);
ll = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/* Perform a partial write if possible or fail. */
if (pos < ll) {
ntfs_debug("Truncating write to inode 0x%lx "
"attribute type 0x%x, because "
"extending the allocation "
"failed (error %d).",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type),
(int)-err);
iov_iter_truncate(from, ll - pos);
} else {
if (err != -ENOSPC)
ntfs_error(vi->i_sb, "Cannot perform "
"write to inode "
"0x%lx, attribute "
"type 0x%x, because "
"extending the "
"allocation failed "
"(error %ld).",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type),
(long)-err);
else
ntfs_debug("Cannot perform write to "
"inode 0x%lx, "
"attribute type 0x%x, "
"because there is not "
"space left.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
goto out;
}
}
}
/*
* If the write starts beyond the initialized size, extend it up to the
* beginning of the write and initialize all non-sparse space between
* the old initialized size and the new one. This automatically also
* increments the vfs inode->i_size to keep it above or equal to the
* initialized_size.
*/
read_lock_irqsave(&ni->size_lock, flags);
ll = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (pos > ll) {
/*
* Wait for ongoing direct i/o to complete before proceeding.
* New direct i/o cannot start as we hold i_mutex.
*/
inode_dio_wait(vi);
err = ntfs_attr_extend_initialized(ni, pos);
if (unlikely(err < 0))
ntfs_error(vi->i_sb, "Cannot perform write to inode "
"0x%lx, attribute type 0x%x, because "
"extending the initialized size "
"failed (error %d).", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
(int)-err);
}
out:
return err;
}
/**
* __ntfs_grab_cache_pages - obtain a number of locked pages
* @mapping: address space mapping from which to obtain page cache pages
* @index: starting index in @mapping at which to begin obtaining pages
* @nr_pages: number of page cache pages to obtain
* @pages: array of pages in which to return the obtained page cache pages
* @cached_page: allocated but as yet unused page
*
* Obtain @nr_pages locked page cache pages from the mapping @mapping and
* starting at index @index.
*
* If a page is newly created, add it to lru list
*
* Note, the page locks are obtained in ascending page index order.
*/
static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
pgoff_t index, const unsigned nr_pages, struct page **pages,
struct page **cached_page)
{
int err, nr;
BUG_ON(!nr_pages);
err = nr = 0;
do {
pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK |
FGP_ACCESSED);
if (!pages[nr]) {
if (!*cached_page) {
*cached_page = page_cache_alloc(mapping);
if (unlikely(!*cached_page)) {
err = -ENOMEM;
goto err_out;
}
}
err = add_to_page_cache_lru(*cached_page, mapping,
index,
mapping_gfp_constraint(mapping, GFP_KERNEL));
if (unlikely(err)) {
if (err == -EEXIST)
continue;
goto err_out;
}
pages[nr] = *cached_page;
*cached_page = NULL;
}
index++;
nr++;
} while (nr < nr_pages);
out:
return err;
err_out:
while (nr > 0) {
unlock_page(pages[--nr]);
put_page(pages[nr]);
}
goto out;
}
static inline void ntfs_submit_bh_for_read(struct buffer_head *bh)
{
lock_buffer(bh);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, bh);
}
/**
* ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
* @pages: array of destination pages
* @nr_pages: number of pages in @pages
* @pos: byte position in file at which the write begins
* @bytes: number of bytes to be written
*
* This is called for non-resident attributes from ntfs_file_buffered_write()
* with i_mutex held on the inode (@pages[0]->mapping->host). There are
* @nr_pages pages in @pages which are locked but not kmap()ped. The source
* data has not yet been copied into the @pages.
*
* Need to fill any holes with actual clusters, allocate buffers if necessary,
* ensure all the buffers are mapped, and bring uptodate any buffers that are
* only partially being written to.
*
* If @nr_pages is greater than one, we are guaranteed that the cluster size is
* greater than PAGE_SIZE, that all pages in @pages are entirely inside
* the same cluster and that they are the entirety of that cluster, and that
* the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
*
* i_size is not to be modified yet.
*
* Return 0 on success or -errno on error.
*/
static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
unsigned nr_pages, s64 pos, size_t bytes)
{
VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
LCN lcn;
s64 bh_pos, vcn_len, end, initialized_size;
sector_t lcn_block;
struct page *page;
struct inode *vi;
ntfs_inode *ni, *base_ni = NULL;
ntfs_volume *vol;
runlist_element *rl, *rl2;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
ntfs_attr_search_ctx *ctx = NULL;
MFT_RECORD *m = NULL;
ATTR_RECORD *a = NULL;
unsigned long flags;
u32 attr_rec_len = 0;
unsigned blocksize, u;
int err, mp_size;
bool rl_write_locked, was_hole, is_retry;
unsigned char blocksize_bits;
struct {
u8 runlist_merged:1;
u8 mft_attr_mapped:1;
u8 mp_rebuilt:1;
u8 attr_switched:1;
} status = { 0, 0, 0, 0 };
BUG_ON(!nr_pages);
BUG_ON(!pages);
BUG_ON(!*pages);
vi = pages[0]->mapping->host;
ni = NTFS_I(vi);
vol = ni->vol;
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
"index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
vi->i_ino, ni->type, pages[0]->index, nr_pages,
(long long)pos, bytes);
blocksize = vol->sb->s_blocksize;
blocksize_bits = vol->sb->s_blocksize_bits;
u = 0;
do {
page = pages[u];
BUG_ON(!page);
/*
* create_empty_buffers() will create uptodate/dirty buffers if
* the page is uptodate/dirty.
*/
if (!page_has_buffers(page)) {
create_empty_buffers(page, blocksize, 0);
if (unlikely(!page_has_buffers(page)))
return -ENOMEM;
}
} while (++u < nr_pages);
rl_write_locked = false;
rl = NULL;
err = 0;
vcn = lcn = -1;
vcn_len = 0;
lcn_block = -1;
was_hole = false;
cpos = pos >> vol->cluster_size_bits;
end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
/*
* Loop over each page and for each page over each buffer. Use goto to
* reduce indentation.
*/
u = 0;
do_next_page:
page = pages[u];
bh_pos = (s64)page->index << PAGE_SHIFT;
bh = head = page_buffers(page);
do {
VCN cdelta;
s64 bh_end;
unsigned bh_cofs;
/* Clear buffer_new on all buffers to reinitialise state. */
if (buffer_new(bh))
clear_buffer_new(bh);
bh_end = bh_pos + blocksize;
bh_cpos = bh_pos >> vol->cluster_size_bits;
bh_cofs = bh_pos & vol->cluster_size_mask;
if (buffer_mapped(bh)) {
/*
* The buffer is already mapped. If it is uptodate,
* ignore it.
*/
if (buffer_uptodate(bh))
continue;
/*
* The buffer is not uptodate. If the page is uptodate
* set the buffer uptodate and otherwise ignore it.
*/
if (PageUptodate(page)) {
set_buffer_uptodate(bh);
continue;
}
/*
* Neither the page nor the buffer are uptodate. If
* the buffer is only partially being written to, we
* need to read it in before the write, i.e. now.
*/
if ((bh_pos < pos && bh_end > pos) ||
(bh_pos < end && bh_end > end)) {
/*
* If the buffer is fully or partially within
* the initialized size, do an actual read.
* Otherwise, simply zero the buffer.
*/
read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (bh_pos < initialized_size) {
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
zero_user(page, bh_offset(bh),
blocksize);
set_buffer_uptodate(bh);
}
}
continue;
}
/* Unmapped buffer. Need to map it. */
bh->b_bdev = vol->sb->s_bdev;
/*
* If the current buffer is in the same clusters as the map
* cache, there is no need to check the runlist again. The
* map cache is made up of @vcn, which is the first cached file
* cluster, @vcn_len which is the number of cached file
* clusters, @lcn is the device cluster corresponding to @vcn,
* and @lcn_block is the block number corresponding to @lcn.
*/
cdelta = bh_cpos - vcn;
if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
map_buffer_cached:
BUG_ON(lcn < 0);
bh->b_blocknr = lcn_block +
(cdelta << (vol->cluster_size_bits -
blocksize_bits)) +
(bh_cofs >> blocksize_bits);
set_buffer_mapped(bh);
/*
* If the page is uptodate so is the buffer. If the
* buffer is fully outside the write, we ignore it if
* it was already allocated and we mark it dirty so it
* gets written out if we allocated it. On the other
* hand, if we allocated the buffer but we are not
* marking it dirty we set buffer_new so we can do
* error recovery.
*/
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
if (unlikely(was_hole)) {
/* We allocated the buffer. */
clean_bdev_bh_alias(bh);
if (bh_end <= pos || bh_pos >= end)
mark_buffer_dirty(bh);
else
set_buffer_new(bh);
}
continue;
}
/* Page is _not_ uptodate. */
if (likely(!was_hole)) {
/*
* Buffer was already allocated. If it is not
* uptodate and is only partially being written
* to, we need to read it in before the write,
* i.e. now.
*/
if (!buffer_uptodate(bh) && bh_pos < end &&
bh_end > pos &&
(bh_pos < pos ||
bh_end > end)) {
/*
* If the buffer is fully or partially
* within the initialized size, do an
* actual read. Otherwise, simply zero
* the buffer.
*/
read_lock_irqsave(&ni->size_lock,
flags);
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock,
flags);
if (bh_pos < initialized_size) {
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
zero_user(page, bh_offset(bh),
blocksize);
set_buffer_uptodate(bh);
}
}
continue;
}
/* We allocated the buffer. */
clean_bdev_bh_alias(bh);
/*
* If the buffer is fully outside the write, zero it,
* set it uptodate, and mark it dirty so it gets
* written out. If it is partially being written to,
* zero region surrounding the write but leave it to
* commit write to do anything else. Finally, if the
* buffer is fully being overwritten, do nothing.
*/
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) {
zero_user(page, bh_offset(bh),
blocksize);
set_buffer_uptodate(bh);
}
mark_buffer_dirty(bh);
continue;
}
set_buffer_new(bh);
if (!buffer_uptodate(bh) &&
(bh_pos < pos || bh_end > end)) {
u8 *kaddr;
unsigned pofs;
kaddr = kmap_atomic(page);
if (bh_pos < pos) {
pofs = bh_pos & ~PAGE_MASK;
memset(kaddr + pofs, 0, pos - bh_pos);
}
if (bh_end > end) {
pofs = end & ~PAGE_MASK;
memset(kaddr + pofs, 0, bh_end - end);
}
kunmap_atomic(kaddr);
flush_dcache_page(page);
}
continue;
}
/*
* Slow path: this is the first buffer in the cluster. If it
* is outside allocated size and is not uptodate, zero it and
* set it uptodate.
*/
read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (bh_pos > initialized_size) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
zero_user(page, bh_offset(bh), blocksize);
set_buffer_uptodate(bh);
}
continue;
}
is_retry = false;
if (!rl) {
down_read(&ni->runlist.lock);
retry_remap:
rl = ni->runlist.rl;
}
if (likely(rl != NULL)) {
/* Seek to element containing target cluster. */
while (rl->length && rl[1].vcn <= bh_cpos)
rl++;
lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
if (likely(lcn >= 0)) {
/*
* Successful remap, setup the map cache and
* use that to deal with the buffer.
*/
was_hole = false;
vcn = bh_cpos;
vcn_len = rl[1].vcn - vcn;
lcn_block = lcn << (vol->cluster_size_bits -
blocksize_bits);
cdelta = 0;
/*
* If the number of remaining clusters touched
* by the write is smaller or equal to the
* number of cached clusters, unlock the
* runlist as the map cache will be used from
* now on.
*/
if (likely(vcn + vcn_len >= cend)) {
if (rl_write_locked) {
up_write(&ni->runlist.lock);
rl_write_locked = false;
} else
up_read(&ni->runlist.lock);
rl = NULL;
}
goto map_buffer_cached;
}
} else
lcn = LCN_RL_NOT_MAPPED;
/*
* If it is not a hole and not out of bounds, the runlist is
* probably unmapped so try to map it now.
*/
if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
/* Attempt to map runlist. */
if (!rl_write_locked) {
/*
* We need the runlist locked for
* writing, so if it is locked for
* reading relock it now and retry in
* case it changed whilst we dropped
* the lock.
*/
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
rl_write_locked = true;
goto retry_remap;
}
err = ntfs_map_runlist_nolock(ni, bh_cpos,
NULL);
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
/*
* If @vcn is out of bounds, pretend @lcn is
* LCN_ENOENT. As long as the buffer is out
* of bounds this will work fine.
*/
if (err == -ENOENT) {
lcn = LCN_ENOENT;
err = 0;
goto rl_not_mapped_enoent;
}
} else
err = -EIO;
/* Failed to map the buffer, even after retrying. */
bh->b_blocknr = -1;
ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
"attribute type 0x%x, vcn 0x%llx, "
"vcn offset 0x%x, because its "
"location on disk could not be "
"determined%s (error code %i).",
ni->mft_no, ni->type,
(unsigned long long)bh_cpos,
(unsigned)bh_pos &
vol->cluster_size_mask,
is_retry ? " even after retrying" : "",
err);
break;
}
rl_not_mapped_enoent:
/*
* The buffer is in a hole or out of bounds. We need to fill
* the hole, unless the buffer is in a cluster which is not
* touched by the write, in which case we just leave the buffer
* unmapped. This can only happen when the cluster size is
* less than the page cache size.
*/
if (unlikely(vol->cluster_size < PAGE_SIZE)) {
bh_cend = (bh_end + vol->cluster_size - 1) >>
vol->cluster_size_bits;
if ((bh_cend <= cpos || bh_cpos >= cend)) {
bh->b_blocknr = -1;
/*
* If the buffer is uptodate we skip it. If it
* is not but the page is uptodate, we can set
* the buffer uptodate. If the page is not
* uptodate, we can clear the buffer and set it
* uptodate. Whether this is worthwhile is
* debatable and this could be removed.
*/
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
zero_user(page, bh_offset(bh),
blocksize);
set_buffer_uptodate(bh);
}
continue;
}
}
/*
* Out of bounds buffer is invalid if it was not really out of
* bounds.
*/
BUG_ON(lcn != LCN_HOLE);
/*
* We need the runlist locked for writing, so if it is locked
* for reading relock it now and retry in case it changed
* whilst we dropped the lock.
*/
BUG_ON(!rl);
if (!rl_write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
rl_write_locked = true;
goto retry_remap;
}
/* Find the previous last allocated cluster. */
BUG_ON(rl->lcn != LCN_HOLE);
lcn = -1;
rl2 = rl;
while (--rl2 >= ni->runlist.rl) {
if (rl2->lcn >= 0) {
lcn = rl2->lcn + rl2->length;
break;
}
}
rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
false);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
ntfs_debug("Failed to allocate cluster, error code %i.",
err);
break;
}
lcn = rl2->lcn;
rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (err != -ENOMEM)
err = -EIO;
if (ntfs_cluster_free_from_rl(vol, rl2)) {
ntfs_error(vol->sb, "Failed to release "
"allocated cluster in error "
"code path. Run chkdsk to "
"recover the lost cluster.");
NVolSetErrors(vol);
}
ntfs_free(rl2);
break;
}
ni->runlist.rl = rl;
status.runlist_merged = 1;
ntfs_debug("Allocated cluster, lcn 0x%llx.",
(unsigned long long)lcn);
/* Map and lock the mft record and get the attribute record. */
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
break;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
unmap_mft_record(base_ni);
break;
}
status.mft_attr_mapped = 1;
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
break;
}
m = ctx->mrec;
a = ctx->attr;
/*
* Find the runlist element with which the attribute extent
* starts. Note, we cannot use the _attr_ version because we
* have mapped the mft record. That is ok because we know the
* runlist fragment must be mapped already to have ever gotten
* here, so we can just use the _rl_ version.
*/
vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
BUG_ON(!rl2);
BUG_ON(!rl2->length);
BUG_ON(rl2->lcn < LCN_HOLE);
highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
/*
* If @highest_vcn is zero, calculate the real highest_vcn
* (which can really be zero).
*/
if (!highest_vcn)
highest_vcn = (sle64_to_cpu(
a->data.non_resident.allocated_size) >>
vol->cluster_size_bits) - 1;
/*
* Determine the size of the mapping pairs array for the new
* extent, i.e. the old extent with the hole filled.
*/
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
highest_vcn);
if (unlikely(mp_size <= 0)) {
if (!(err = mp_size))
err = -EIO;
ntfs_debug("Failed to get size for mapping pairs "
"array, error code %i.", err);
break;
}
/*
* Resize the attribute record to fit the new mapping pairs
* array.
*/
attr_rec_len = le32_to_cpu(a->length);
err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
a->data.non_resident.mapping_pairs_offset));
if (unlikely(err)) {
BUG_ON(err != -ENOSPC);
// TODO: Deal with this by using the current attribute
// and fill it with as much of the mapping pairs
// array as possible. Then loop over each attribute
// extent rewriting the mapping pairs arrays as we go
// along and if when we reach the end we have not
// enough space, try to resize the last attribute
// extent and if even that fails, add a new attribute
// extent.
// We could also try to resize at each step in the hope
// that we will not need to rewrite every single extent.
// Note, we may need to decompress some extents to fill
// the runlist as we are walking the extents...
ntfs_error(vol->sb, "Not enough space in the mft "
"record for the extended attribute "
"record. This case is not "
"implemented yet.");
err = -EOPNOTSUPP;
break ;
}
status.mp_rebuilt = 1;
/*
* Generate the mapping pairs array directly into the attribute
* record.
*/
err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
a->data.non_resident.mapping_pairs_offset),
mp_size, rl2, vcn, highest_vcn, NULL);
if (unlikely(err)) {
ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
"attribute type 0x%x, because building "
"the mapping pairs failed with error "
"code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
break;
}
/* Update the highest_vcn but only if it was not set. */
if (unlikely(!a->data.non_resident.highest_vcn))
a->data.non_resident.highest_vcn =
cpu_to_sle64(highest_vcn);
/*
* If the attribute is sparse/compressed, update the compressed
* size in the ntfs_inode structure and the attribute record.
*/
if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
/*
* If we are not in the first attribute extent, switch
* to it, but first ensure the changes will make it to
* disk later.
*/
if (a->data.non_resident.lowest_vcn) {
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(ni->type, ni->name,
ni->name_len, CASE_SENSITIVE,
0, NULL, 0, ctx);
if (unlikely(err)) {
status.attr_switched = 1;
break;
}
/* @m is not used any more so do not set it. */
a = ctx->attr;
}
write_lock_irqsave(&ni->size_lock, flags);
ni->itype.compressed.size += vol->cluster_size;
a->data.non_resident.compressed_size =
cpu_to_sle64(ni->itype.compressed.size);
write_unlock_irqrestore(&ni->size_lock, flags);
}
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
/* Successfully filled the hole. */
status.runlist_merged = 0;
status.mft_attr_mapped = 0;
status.mp_rebuilt = 0;
/* Setup the map cache and use that to deal with the buffer. */
was_hole = true;
vcn = bh_cpos;
vcn_len = 1;
lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
cdelta = 0;
/*
* If the number of remaining clusters in the @pages is smaller
* or equal to the number of cached clusters, unlock the
* runlist as the map cache will be used from now on.
*/
if (likely(vcn + vcn_len >= cend)) {
up_write(&ni->runlist.lock);
rl_write_locked = false;
rl = NULL;
}
goto map_buffer_cached;
} while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
/* If there are no errors, do the next page. */
if (likely(!err && ++u < nr_pages))
goto do_next_page;
/* If there are no errors, release the runlist lock if we took it. */
if (likely(!err)) {
if (unlikely(rl_write_locked)) {
up_write(&ni->runlist.lock);
rl_write_locked = false;
} else if (unlikely(rl))
up_read(&ni->runlist.lock);
rl = NULL;
}
/* If we issued read requests, let them complete. */
read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
while (wait_bh > wait) {
bh = *--wait_bh;
wait_on_buffer(bh);
if (likely(buffer_uptodate(bh))) {
page = bh->b_page;
bh_pos = ((s64)page->index << PAGE_SHIFT) +
bh_offset(bh);
/*
* If the buffer overflows the initialized size, need
* to zero the overflowing region.
*/
if (unlikely(bh_pos + blocksize > initialized_size)) {
int ofs = 0;
if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos;
zero_user_segment(page, bh_offset(bh) + ofs,
blocksize);
}
} else /* if (unlikely(!buffer_uptodate(bh))) */
err = -EIO;
}
if (likely(!err)) {
/* Clear buffer_new on all buffers. */
u = 0;
do {
bh = head = page_buffers(pages[u]);
do {
if (buffer_new(bh))
clear_buffer_new(bh);
} while ((bh = bh->b_this_page) != head);
} while (++u < nr_pages);
ntfs_debug("Done.");
return err;
}
if (status.attr_switched) {
/* Get back to the attribute extent we modified. */
ntfs_attr_reinit_search_ctx(ctx);
if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
ntfs_error(vol->sb, "Failed to find required "
"attribute extent of attribute in "
"error code path. Run chkdsk to "
"recover.");
write_lock_irqsave(&ni->size_lock, flags);
ni->itype.compressed.size += vol->cluster_size;
write_unlock_irqrestore(&ni->size_lock, flags);
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
/*
* The only thing that is now wrong is the compressed
* size of the base attribute extent which chkdsk
* should be able to fix.
*/
NVolSetErrors(vol);
} else {
m = ctx->mrec;
a = ctx->attr;
status.attr_switched = 0;
}
}
/*
* If the runlist has been modified, need to restore it by punching a
* hole into it and we then need to deallocate the on-disk cluster as
* well. Note, we only modify the runlist if we are able to generate a
* new mapping pairs array, i.e. only when the mapped attribute extent
* is not switched.
*/
if (status.runlist_merged && !status.attr_switched) {
BUG_ON(!rl_write_locked);
/* Make the file cluster we allocated sparse in the runlist. */
if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
ntfs_error(vol->sb, "Failed to punch hole into "
"attribute runlist in error code "
"path. Run chkdsk to recover the "
"lost cluster.");
NVolSetErrors(vol);
} else /* if (success) */ {
status.runlist_merged = 0;
/*
* Deallocate the on-disk cluster we allocated but only
* if we succeeded in punching its vcn out of the
* runlist.
*/
down_write(&vol->lcnbmp_lock);
if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
ntfs_error(vol->sb, "Failed to release "
"allocated cluster in error "
"code path. Run chkdsk to "
"recover the lost cluster.");
NVolSetErrors(vol);
}
up_write(&vol->lcnbmp_lock);
}
}
/*
* Resize the attribute record to its old size and rebuild the mapping
* pairs array. Note, we only can do this if the runlist has been
* restored to its old state which also implies that the mapped
* attribute extent is not switched.
*/
if (status.mp_rebuilt && !status.runlist_merged) {
if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
ntfs_error(vol->sb, "Failed to restore attribute "
"record in error code path. Run "
"chkdsk to recover.");
NVolSetErrors(vol);
} else /* if (success) */ {
if (ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.
mapping_pairs_offset), attr_rec_len -
le16_to_cpu(a->data.non_resident.
mapping_pairs_offset), ni->runlist.rl,
vcn, highest_vcn, NULL)) {
ntfs_error(vol->sb, "Failed to restore "
"mapping pairs array in error "
"code path. Run chkdsk to "
"recover.");
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
}
}
/* Release the mft record and the attribute. */
if (status.mft_attr_mapped) {
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
}
/* Release the runlist lock. */
if (rl_write_locked)
up_write(&ni->runlist.lock);
else if (rl)
up_read(&ni->runlist.lock);
/*
* Zero out any newly allocated blocks to avoid exposing stale data.
* If BH_New is set, we know that the block was newly allocated above
* and that it has not been fully zeroed and marked dirty yet.
*/
nr_pages = u;
u = 0;
end = bh_cpos << vol->cluster_size_bits;
do {
page = pages[u];
bh = head = page_buffers(page);
do {
if (u == nr_pages &&
((s64)page->index << PAGE_SHIFT) +
bh_offset(bh) >= end)
break;
if (!buffer_new(bh))
continue;
clear_buffer_new(bh);
if (!buffer_uptodate(bh)) {
if (PageUptodate(page))
set_buffer_uptodate(bh);
else {
zero_user(page, bh_offset(bh),
blocksize);
set_buffer_uptodate(bh);
}
}
mark_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
} while (++u <= nr_pages);
ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
return err;
}
static inline void ntfs_flush_dcache_pages(struct page **pages,
unsigned nr_pages)
{
BUG_ON(!nr_pages);
/*
* Warning: Do not do the decrement at the same time as the call to
* flush_dcache_page() because it is a NULL macro on i386 and hence the
* decrement never happens so the loop never terminates.
*/
do {
--nr_pages;
flush_dcache_page(pages[nr_pages]);
} while (nr_pages > 0);
}
/**
* ntfs_commit_pages_after_non_resident_write - commit the received data
* @pages: array of destination pages
* @nr_pages: number of pages in @pages
* @pos: byte position in file at which the write begins
* @bytes: number of bytes to be written
*
* See description of ntfs_commit_pages_after_write(), below.
*/
static inline int ntfs_commit_pages_after_non_resident_write(
struct page **pages, const unsigned nr_pages,
s64 pos, size_t bytes)
{
s64 end, initialized_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
struct buffer_head *bh, *head;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
ATTR_RECORD *a;
unsigned long flags;
unsigned blocksize, u;
int err;
vi = pages[0]->mapping->host;
ni = NTFS_I(vi);
blocksize = vi->i_sb->s_blocksize;
end = pos + bytes;
u = 0;
do {
s64 bh_pos;
struct page *page;
bool partial;
page = pages[u];
bh_pos = (s64)page->index << PAGE_SHIFT;
bh = head = page_buffers(page);
partial = false;
do {
s64 bh_end;
bh_end = bh_pos + blocksize;
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh))
partial = true;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
} while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
/*
* If all buffers are now uptodate but the page is not, set the
* page uptodate.
*/
if (!partial && !PageUptodate(page))
SetPageUptodate(page);
} while (++u < nr_pages);
/*
* Finally, if we do not need to update initialized_size or i_size we
* are finished.
*/
read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (end <= initialized_size) {
ntfs_debug("Done.");
return 0;
}
/*
* Update initialized_size/i_size as appropriate, both in the inode and
* the mft record.
*/
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/* Map, pin, and lock the mft record. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
BUG_ON(!NInoNonResident(ni));
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
a = ctx->attr;
BUG_ON(!a->non_resident);
write_lock_irqsave(&ni->size_lock, flags);
BUG_ON(end > ni->allocated_size);
ni->initialized_size = end;
a->data.non_resident.initialized_size = cpu_to_sle64(end);
if (end > i_size_read(vi)) {
i_size_write(vi, end);
a->data.non_resident.data_size =
a->data.non_resident.initialized_size;
}
write_unlock_irqrestore(&ni->size_lock, flags);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
ntfs_debug("Done.");
return 0;
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
"code %i).", err);
if (err != -ENOMEM)
NVolSetErrors(ni->vol);
return err;
}
/**
* ntfs_commit_pages_after_write - commit the received data
* @pages: array of destination pages
* @nr_pages: number of pages in @pages
* @pos: byte position in file at which the write begins
* @bytes: number of bytes to be written
*
* This is called from ntfs_file_buffered_write() with i_mutex held on the inode
* (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
* locked but not kmap()ped. The source data has already been copied into the
* @page. ntfs_prepare_pages_for_non_resident_write() has been called before
* the data was copied (for non-resident attributes only) and it returned
* success.
*
* Need to set uptodate and mark dirty all buffers within the boundary of the
* write. If all buffers in a page are uptodate we set the page uptodate, too.
*
* Setting the buffers dirty ensures that they get written out later when
* ntfs_writepage() is invoked by the VM.
*
* Finally, we need to update i_size and initialized_size as appropriate both
* in the inode and the mft record.
*
* This is modelled after fs/buffer.c::generic_commit_write(), which marks
* buffers uptodate and dirty, sets the page uptodate if all buffers in the
* page are uptodate, and updates i_size if the end of io is beyond i_size. In
* that case, it also marks the inode dirty.
*
* If things have gone as outlined in
* ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
* content modifications here for non-resident attributes. For resident
* attributes we need to do the uptodate bringing here which we combine with
* the copying into the mft record which means we save one atomic kmap.
*
* Return 0 on success or -errno on error.
*/
static int ntfs_commit_pages_after_write(struct page **pages,
const unsigned nr_pages, s64 pos, size_t bytes)
{
s64 end, initialized_size;
loff_t i_size;
struct inode *vi;
ntfs_inode *ni, *base_ni;
struct page *page;
ntfs_attr_search_ctx *ctx;
MFT_RECORD *m;
ATTR_RECORD *a;
char *kattr, *kaddr;
unsigned long flags;
u32 attr_len;
int err;
BUG_ON(!nr_pages);
BUG_ON(!pages);
page = pages[0];
BUG_ON(!page);
vi = page->mapping->host;
ni = NTFS_I(vi);
ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
"index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
vi->i_ino, ni->type, page->index, nr_pages,
(long long)pos, bytes);
if (NInoNonResident(ni))
return ntfs_commit_pages_after_non_resident_write(pages,
nr_pages, pos, bytes);
BUG_ON(nr_pages > 1);
/*
* Attribute is resident, implying it is not compressed, encrypted, or
* sparse.
*/
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
BUG_ON(NInoNonResident(ni));
/* Map, pin, and lock the mft record. */
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
a = ctx->attr;
BUG_ON(a->non_resident);
/* The total length of the attribute value. */
attr_len = le32_to_cpu(a->data.resident.value_length);
i_size = i_size_read(vi);
BUG_ON(attr_len != i_size);
BUG_ON(pos > attr_len);
end = pos + bytes;
BUG_ON(end > le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset));
kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
kaddr = kmap_atomic(page);
/* Copy the received data from the page to the mft record. */
memcpy(kattr + pos, kaddr + pos, bytes);
/* Update the attribute length if necessary. */
if (end > attr_len) {
attr_len = end;
a->data.resident.value_length = cpu_to_le32(attr_len);
}
/*
* If the page is not uptodate, bring the out of bounds area(s)
* uptodate by copying data from the mft record to the page.
*/
if (!PageUptodate(page)) {
if (pos > 0)
memcpy(kaddr, kattr, pos);
if (end < attr_len)
memcpy(kaddr + end, kattr + end, attr_len - end);
/* Zero the region outside the end of the attribute value. */
memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len);
flush_dcache_page(page);
SetPageUptodate(page);
}
kunmap_atomic(kaddr);
/* Update initialized_size/i_size if necessary. */
read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->initialized_size;
BUG_ON(end > ni->allocated_size);
read_unlock_irqrestore(&ni->size_lock, flags);
BUG_ON(initialized_size != i_size);
if (end > initialized_size) {
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = end;
i_size_write(vi, end);
write_unlock_irqrestore(&ni->size_lock, flags);
}
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
ntfs_debug("Done.");
return 0;
err_out:
if (err == -ENOMEM) {
ntfs_warning(vi->i_sb, "Error allocating memory required to "
"commit the write.");
if (PageUptodate(page)) {
ntfs_warning(vi->i_sb, "Page is uptodate, setting "
"dirty so the write will be retried "
"later on by the VM.");
/*
* Put the page on mapping->dirty_pages, but leave its
* buffers' dirty state as-is.
*/
__set_page_dirty_nobuffers(page);
err = 0;
} else
ntfs_error(vi->i_sb, "Page is not uptodate. Written "
"data has been lost.");
} else {
ntfs_error(vi->i_sb, "Resident attribute commit write failed "
"with error %i.", err);
NVolSetErrors(ni->vol);
}
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
return err;
}
/*
* Copy as much as we can into the pages and return the number of bytes which
* were successfully copied. If a fault is encountered then clear the pages
* out to (ofs + bytes) and return the number of bytes which were copied.
*/
static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
unsigned ofs, struct iov_iter *i, size_t bytes)
{
struct page **last_page = pages + nr_pages;
size_t total = 0;
unsigned len, copied;
do {
len = PAGE_SIZE - ofs;
if (len > bytes)
len = bytes;
copied = copy_page_from_iter_atomic(*pages, ofs, len, i);
total += copied;
bytes -= copied;
if (!bytes)
break;
if (copied < len)
goto err;
ofs = 0;
} while (++pages < last_page);
out:
return total;
err:
/* Zero the rest of the target like __copy_from_user(). */
len = PAGE_SIZE - copied;
do {
if (len > bytes)
len = bytes;
zero_user(*pages, copied, len);
bytes -= len;
copied = 0;
len = PAGE_SIZE;
} while (++pages < last_page);
goto out;
}
/**
* ntfs_perform_write - perform buffered write to a file
* @file: file to write to
* @i: iov_iter with data to write
* @pos: byte offset in file at which to begin writing to
*/
static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
loff_t pos)
{
struct address_space *mapping = file->f_mapping;
struct inode *vi = mapping->host;
ntfs_inode *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
struct page *cached_page = NULL;
VCN last_vcn;
LCN lcn;
size_t bytes;
ssize_t status, written = 0;
unsigned nr_pages;
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
"0x%llx, count 0x%lx.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
(unsigned long long)pos,
(unsigned long)iov_iter_count(i));
/*
* If a previous ntfs_truncate() failed, repeat it and abort if it
* fails again.
*/
if (unlikely(NInoTruncateFailed(ni))) {
int err;
inode_dio_wait(vi);
err = ntfs_truncate(vi);
if (err || NInoTruncateFailed(ni)) {
if (!err)
err = -EIO;
ntfs_error(vol->sb, "Cannot perform write to inode "
"0x%lx, attribute type 0x%x, because "
"ntfs_truncate() failed (error code "
"%i).", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
return err;
}
}
/*
* Determine the number of pages per cluster for non-resident
* attributes.
*/
nr_pages = 1;
if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni))
nr_pages = vol->cluster_size >> PAGE_SHIFT;
last_vcn = -1;
do {
VCN vcn;
pgoff_t start_idx;
unsigned ofs, do_pages, u;
size_t copied;
start_idx = pos >> PAGE_SHIFT;
ofs = pos & ~PAGE_MASK;
bytes = PAGE_SIZE - ofs;
do_pages = 1;
if (nr_pages > 1) {
vcn = pos >> vol->cluster_size_bits;
if (vcn != last_vcn) {
last_vcn = vcn;
/*
* Get the lcn of the vcn the write is in. If
* it is a hole, need to lock down all pages in
* the cluster.
*/
down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
vol->cluster_size_bits, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
if (lcn == LCN_ENOMEM)
status = -ENOMEM;
else {
status = -EIO;
ntfs_error(vol->sb, "Cannot "
"perform write to "
"inode 0x%lx, "
"attribute type 0x%x, "
"because the attribute "
"is corrupt.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
}
break;
}
if (lcn == LCN_HOLE) {
start_idx = (pos & ~(s64)
vol->cluster_size_mask)
>> PAGE_SHIFT;
bytes = vol->cluster_size - (pos &
vol->cluster_size_mask);
do_pages = nr_pages;
}
}
}
if (bytes > iov_iter_count(i))
bytes = iov_iter_count(i);
again:
/*
* Bring in the user page(s) that we will copy from _first_.
* Otherwise there is a nasty deadlock on copying from the same
* page(s) as we are writing to, without it/them being marked
* up-to-date. Note, at present there is nothing to stop the
* pages being swapped out between us bringing them into memory
* and doing the actual copying.
*/
if (unlikely(fault_in_iov_iter_readable(i, bytes))) {
status = -EFAULT;
break;
}
/* Get and lock @do_pages starting at index @start_idx. */
status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
pages, &cached_page);
if (unlikely(status))
break;
/*
* For non-resident attributes, we need to fill any holes with
* actual clusters and ensure all bufferes are mapped. We also
* need to bring uptodate any buffers that are only partially
* being written to.
*/
if (NInoNonResident(ni)) {
status = ntfs_prepare_pages_for_non_resident_write(
pages, do_pages, pos, bytes);
if (unlikely(status)) {
do {
unlock_page(pages[--do_pages]);
put_page(pages[do_pages]);
} while (do_pages);
break;
}
}
u = (pos >> PAGE_SHIFT) - pages[0]->index;
copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
i, bytes);
ntfs_flush_dcache_pages(pages + u, do_pages - u);
status = 0;
if (likely(copied == bytes)) {
status = ntfs_commit_pages_after_write(pages, do_pages,
pos, bytes);
}
do {
unlock_page(pages[--do_pages]);
put_page(pages[do_pages]);
} while (do_pages);
if (unlikely(status < 0)) {
iov_iter_revert(i, copied);
break;
}
cond_resched();
if (unlikely(copied < bytes)) {
iov_iter_revert(i, copied);
if (copied)
bytes = copied;
else if (bytes > PAGE_SIZE - ofs)
bytes = PAGE_SIZE - ofs;
goto again;
}
pos += copied;
written += copied;
balance_dirty_pages_ratelimited(mapping);
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
} while (iov_iter_count(i));
if (cached_page)
put_page(cached_page);
ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
written ? "written" : "status", (unsigned long)written,
(long)status);
return written ? written : status;
}
/**
* ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
* @iocb: IO state structure
* @from: iov_iter with data to write
*
* Basically the same as generic_file_write_iter() except that it ends up
* up calling ntfs_perform_write() instead of generic_perform_write() and that
* O_DIRECT is not implemented.
*/
static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *vi = file_inode(file);
ssize_t written = 0;
ssize_t err;
inode_lock(vi);
/* We can write back this queue in page reclaim. */
err = ntfs_prepare_file_for_write(iocb, from);
if (iov_iter_count(from) && !err)
written = ntfs_perform_write(file, from, iocb->ki_pos);
inode_unlock(vi);
iocb->ki_pos += written;
if (likely(written > 0))
written = generic_write_sync(iocb, written);
return written ? written : err;
}
/**
* ntfs_file_fsync - sync a file to disk
* @filp: file to be synced
* @datasync: if non-zero only flush user data and not metadata
*
* Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
* system calls. This function is inspired by fs/buffer.c::file_fsync().
*
* If @datasync is false, write the mft record and all associated extent mft
* records as well as the $DATA attribute and then sync the block device.
*
* If @datasync is true and the attribute is non-resident, we skip the writing
* of the mft record and all associated extent mft records (this might still
* happen due to the write_inode_now() call).
*
* Also, if @datasync is true, we do not wait on the inode to be written out
* but we always wait on the page cache pages to be written out.
*
* Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
* this problem for now.
*/
static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
struct inode *vi = filp->f_mapping->host;
int err, ret = 0;
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
err = file_write_and_wait_range(filp, start, end);
if (err)
return err;
inode_lock(vi);
BUG_ON(S_ISDIR(vi->i_mode));
if (!datasync || !NInoNonResident(NTFS_I(vi)))
ret = __ntfs_write_inode(vi, 1);
write_inode_now(vi, !datasync);
/*
* NOTE: If we were to use mapping->private_list (see ext2 and
* fs/buffer.c) for dirty blocks then we could optimize the below to be
* sync_mapping_buffers(vi->i_mapping).
*/
err = sync_blockdev(vi->i_sb->s_bdev);
if (unlikely(err && !ret))
ret = err;
if (likely(!ret))
ntfs_debug("Done.");
else
ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
"%u.", datasync ? "data" : "", vi->i_ino, -ret);
inode_unlock(vi);
return ret;
}
#endif /* NTFS_RW */
const struct file_operations ntfs_file_ops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
#ifdef NTFS_RW
.write_iter = ntfs_file_write_iter,
.fsync = ntfs_file_fsync,
#endif /* NTFS_RW */
.mmap = generic_file_mmap,
.open = ntfs_file_open,
.splice_read = filemap_splice_read,
};
const struct inode_operations ntfs_file_inode_ops = {
#ifdef NTFS_RW
.setattr = ntfs_setattr,
#endif /* NTFS_RW */
};
const struct file_operations ntfs_empty_file_ops = {};
const struct inode_operations ntfs_empty_inode_ops = {};
| linux-master | fs/ntfs/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2006 Anton Altaparmakov
*/
#include <linux/slab.h>
#include "types.h"
#include "debug.h"
#include "ntfs.h"
/*
* IMPORTANT
* =========
*
* All these routines assume that the Unicode characters are in little endian
* encoding inside the strings!!!
*/
/*
* This is used by the name collation functions to quickly determine what
* characters are (in)valid.
*/
static const u8 legal_ansi_char_array[0x40] = {
0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x17, 0x07, 0x18, 0x17, 0x17, 0x17, 0x17, 0x17,
0x17, 0x17, 0x18, 0x16, 0x16, 0x17, 0x07, 0x00,
0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17,
0x17, 0x17, 0x04, 0x16, 0x18, 0x16, 0x18, 0x18,
};
/**
* ntfs_are_names_equal - compare two Unicode names for equality
* @s1: name to compare to @s2
* @s1_len: length in Unicode characters of @s1
* @s2: name to compare to @s1
* @s2_len: length in Unicode characters of @s2
* @ic: ignore case bool
* @upcase: upcase table (only if @ic == IGNORE_CASE)
* @upcase_size: length in Unicode characters of @upcase (if present)
*
* Compare the names @s1 and @s2 and return 'true' (1) if the names are
* identical, or 'false' (0) if they are not identical. If @ic is IGNORE_CASE,
* the @upcase table is used to performa a case insensitive comparison.
*/
bool ntfs_are_names_equal(const ntfschar *s1, size_t s1_len,
const ntfschar *s2, size_t s2_len, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_size)
{
if (s1_len != s2_len)
return false;
if (ic == CASE_SENSITIVE)
return !ntfs_ucsncmp(s1, s2, s1_len);
return !ntfs_ucsncasecmp(s1, s2, s1_len, upcase, upcase_size);
}
/**
* ntfs_collate_names - collate two Unicode names
* @name1: first Unicode name to compare
* @name2: second Unicode name to compare
* @err_val: if @name1 contains an invalid character return this value
* @ic: either CASE_SENSITIVE or IGNORE_CASE
* @upcase: upcase table (ignored if @ic is CASE_SENSITIVE)
* @upcase_len: upcase table size (ignored if @ic is CASE_SENSITIVE)
*
* ntfs_collate_names collates two Unicode names and returns:
*
* -1 if the first name collates before the second one,
* 0 if the names match,
* 1 if the second name collates before the first one, or
* @err_val if an invalid character is found in @name1 during the comparison.
*
* The following characters are considered invalid: '"', '*', '<', '>' and '?'.
*/
int ntfs_collate_names(const ntfschar *name1, const u32 name1_len,
const ntfschar *name2, const u32 name2_len,
const int err_val, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_len)
{
u32 cnt, min_len;
u16 c1, c2;
min_len = name1_len;
if (name1_len > name2_len)
min_len = name2_len;
for (cnt = 0; cnt < min_len; ++cnt) {
c1 = le16_to_cpu(*name1++);
c2 = le16_to_cpu(*name2++);
if (ic) {
if (c1 < upcase_len)
c1 = le16_to_cpu(upcase[c1]);
if (c2 < upcase_len)
c2 = le16_to_cpu(upcase[c2]);
}
if (c1 < 64 && legal_ansi_char_array[c1] & 8)
return err_val;
if (c1 < c2)
return -1;
if (c1 > c2)
return 1;
}
if (name1_len < name2_len)
return -1;
if (name1_len == name2_len)
return 0;
/* name1_len > name2_len */
c1 = le16_to_cpu(*name1);
if (c1 < 64 && legal_ansi_char_array[c1] & 8)
return err_val;
return 1;
}
/**
* ntfs_ucsncmp - compare two little endian Unicode strings
* @s1: first string
* @s2: second string
* @n: maximum unicode characters to compare
*
* Compare the first @n characters of the Unicode strings @s1 and @s2,
* The strings in little endian format and appropriate le16_to_cpu()
* conversion is performed on non-little endian machines.
*
* The function returns an integer less than, equal to, or greater than zero
* if @s1 (or the first @n Unicode characters thereof) is found, respectively,
* to be less than, to match, or be greater than @s2.
*/
int ntfs_ucsncmp(const ntfschar *s1, const ntfschar *s2, size_t n)
{
u16 c1, c2;
size_t i;
for (i = 0; i < n; ++i) {
c1 = le16_to_cpu(s1[i]);
c2 = le16_to_cpu(s2[i]);
if (c1 < c2)
return -1;
if (c1 > c2)
return 1;
if (!c1)
break;
}
return 0;
}
/**
* ntfs_ucsncasecmp - compare two little endian Unicode strings, ignoring case
* @s1: first string
* @s2: second string
* @n: maximum unicode characters to compare
* @upcase: upcase table
* @upcase_size: upcase table size in Unicode characters
*
* Compare the first @n characters of the Unicode strings @s1 and @s2,
* ignoring case. The strings in little endian format and appropriate
* le16_to_cpu() conversion is performed on non-little endian machines.
*
* Each character is uppercased using the @upcase table before the comparison.
*
* The function returns an integer less than, equal to, or greater than zero
* if @s1 (or the first @n Unicode characters thereof) is found, respectively,
* to be less than, to match, or be greater than @s2.
*/
int ntfs_ucsncasecmp(const ntfschar *s1, const ntfschar *s2, size_t n,
const ntfschar *upcase, const u32 upcase_size)
{
size_t i;
u16 c1, c2;
for (i = 0; i < n; ++i) {
if ((c1 = le16_to_cpu(s1[i])) < upcase_size)
c1 = le16_to_cpu(upcase[c1]);
if ((c2 = le16_to_cpu(s2[i])) < upcase_size)
c2 = le16_to_cpu(upcase[c2]);
if (c1 < c2)
return -1;
if (c1 > c2)
return 1;
if (!c1)
break;
}
return 0;
}
void ntfs_upcase_name(ntfschar *name, u32 name_len, const ntfschar *upcase,
const u32 upcase_len)
{
u32 i;
u16 u;
for (i = 0; i < name_len; i++)
if ((u = le16_to_cpu(name[i])) < upcase_len)
name[i] = upcase[u];
}
void ntfs_file_upcase_value(FILE_NAME_ATTR *file_name_attr,
const ntfschar *upcase, const u32 upcase_len)
{
ntfs_upcase_name((ntfschar*)&file_name_attr->file_name,
file_name_attr->file_name_length, upcase, upcase_len);
}
int ntfs_file_compare_values(FILE_NAME_ATTR *file_name_attr1,
FILE_NAME_ATTR *file_name_attr2,
const int err_val, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_len)
{
return ntfs_collate_names((ntfschar*)&file_name_attr1->file_name,
file_name_attr1->file_name_length,
(ntfschar*)&file_name_attr2->file_name,
file_name_attr2->file_name_length,
err_val, ic, upcase, upcase_len);
}
/**
* ntfs_nlstoucs - convert NLS string to little endian Unicode string
* @vol: ntfs volume which we are working with
* @ins: input NLS string buffer
* @ins_len: length of input string in bytes
* @outs: on return contains the allocated output Unicode string buffer
*
* Convert the input string @ins, which is in whatever format the loaded NLS
* map dictates, into a little endian, 2-byte Unicode string.
*
* This function allocates the string and the caller is responsible for
* calling kmem_cache_free(ntfs_name_cache, *@outs); when finished with it.
*
* On success the function returns the number of Unicode characters written to
* the output string *@outs (>= 0), not counting the terminating Unicode NULL
* character. *@outs is set to the allocated output string buffer.
*
* On error, a negative number corresponding to the error code is returned. In
* that case the output string is not allocated. Both *@outs and *@outs_len
* are then undefined.
*
* This might look a bit odd due to fast path optimization...
*/
int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
const int ins_len, ntfschar **outs)
{
struct nls_table *nls = vol->nls_map;
ntfschar *ucs;
wchar_t wc;
int i, o, wc_len;
/* We do not trust outside sources. */
if (likely(ins)) {
ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
if (likely(ucs)) {
for (i = o = 0; i < ins_len; i += wc_len) {
wc_len = nls->char2uni(ins + i, ins_len - i,
&wc);
if (likely(wc_len >= 0 &&
o < NTFS_MAX_NAME_LEN)) {
if (likely(wc)) {
ucs[o++] = cpu_to_le16(wc);
continue;
} /* else if (!wc) */
break;
} /* else if (wc_len < 0 ||
o >= NTFS_MAX_NAME_LEN) */
goto name_err;
}
ucs[o] = 0;
*outs = ucs;
return o;
} /* else if (!ucs) */
ntfs_error(vol->sb, "Failed to allocate buffer for converted "
"name from ntfs_name_cache.");
return -ENOMEM;
} /* else if (!ins) */
ntfs_error(vol->sb, "Received NULL pointer.");
return -EINVAL;
name_err:
kmem_cache_free(ntfs_name_cache, ucs);
if (wc_len < 0) {
ntfs_error(vol->sb, "Name using character set %s contains "
"characters that cannot be converted to "
"Unicode.", nls->charset);
i = -EILSEQ;
} else /* if (o >= NTFS_MAX_NAME_LEN) */ {
ntfs_error(vol->sb, "Name is too long (maximum length for a "
"name on NTFS is %d Unicode characters.",
NTFS_MAX_NAME_LEN);
i = -ENAMETOOLONG;
}
return i;
}
/**
* ntfs_ucstonls - convert little endian Unicode string to NLS string
* @vol: ntfs volume which we are working with
* @ins: input Unicode string buffer
* @ins_len: length of input string in Unicode characters
* @outs: on return contains the (allocated) output NLS string buffer
* @outs_len: length of output string buffer in bytes
*
* Convert the input little endian, 2-byte Unicode string @ins, of length
* @ins_len into the string format dictated by the loaded NLS.
*
* If *@outs is NULL, this function allocates the string and the caller is
* responsible for calling kfree(*@outs); when finished with it. In this case
* @outs_len is ignored and can be 0.
*
* On success the function returns the number of bytes written to the output
* string *@outs (>= 0), not counting the terminating NULL byte. If the output
* string buffer was allocated, *@outs is set to it.
*
* On error, a negative number corresponding to the error code is returned. In
* that case the output string is not allocated. The contents of *@outs are
* then undefined.
*
* This might look a bit odd due to fast path optimization...
*/
int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
const int ins_len, unsigned char **outs, int outs_len)
{
struct nls_table *nls = vol->nls_map;
unsigned char *ns;
int i, o, ns_len, wc;
/* We don't trust outside sources. */
if (ins) {
ns = *outs;
ns_len = outs_len;
if (ns && !ns_len) {
wc = -ENAMETOOLONG;
goto conversion_err;
}
if (!ns) {
ns_len = ins_len * NLS_MAX_CHARSET_SIZE;
ns = kmalloc(ns_len + 1, GFP_NOFS);
if (!ns)
goto mem_err_out;
}
for (i = o = 0; i < ins_len; i++) {
retry: wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o,
ns_len - o);
if (wc > 0) {
o += wc;
continue;
} else if (!wc)
break;
else if (wc == -ENAMETOOLONG && ns != *outs) {
unsigned char *tc;
/* Grow in multiples of 64 bytes. */
tc = kmalloc((ns_len + 64) &
~63, GFP_NOFS);
if (tc) {
memcpy(tc, ns, ns_len);
ns_len = ((ns_len + 64) & ~63) - 1;
kfree(ns);
ns = tc;
goto retry;
} /* No memory so goto conversion_error; */
} /* wc < 0, real error. */
goto conversion_err;
}
ns[o] = 0;
*outs = ns;
return o;
} /* else (!ins) */
ntfs_error(vol->sb, "Received NULL pointer.");
return -EINVAL;
conversion_err:
ntfs_error(vol->sb, "Unicode name contains characters that cannot be "
"converted to character set %s. You might want to "
"try to use the mount option nls=utf8.", nls->charset);
if (ns != *outs)
kfree(ns);
if (wc != -ENAMETOOLONG)
wc = -EILSEQ;
return wc;
mem_err_out:
ntfs_error(vol->sb, "Failed to allocate name!");
return -ENOMEM;
}
| linux-master | fs/ntfs/unistr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* usnjrnl.h - NTFS kernel transaction log ($UsnJrnl) handling. Part of the
* Linux-NTFS project.
*
* Copyright (c) 2005 Anton Altaparmakov
*/
#ifdef NTFS_RW
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include "aops.h"
#include "debug.h"
#include "endian.h"
#include "time.h"
#include "types.h"
#include "usnjrnl.h"
#include "volume.h"
/**
* ntfs_stamp_usnjrnl - stamp the transaction log ($UsnJrnl) on an ntfs volume
* @vol: ntfs volume on which to stamp the transaction log
*
* Stamp the transaction log ($UsnJrnl) on the ntfs volume @vol and return
* 'true' on success and 'false' on error.
*
* This function assumes that the transaction log has already been loaded and
* consistency checked by a call to fs/ntfs/super.c::load_and_init_usnjrnl().
*/
bool ntfs_stamp_usnjrnl(ntfs_volume *vol)
{
ntfs_debug("Entering.");
if (likely(!NVolUsnJrnlStamped(vol))) {
sle64 stamp;
struct page *page;
USN_HEADER *uh;
page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from "
"$UsnJrnl/$DATA/$Max attribute.");
return false;
}
uh = (USN_HEADER*)page_address(page);
stamp = get_current_ntfs_time();
ntfs_debug("Stamping transaction log ($UsnJrnl): old "
"journal_id 0x%llx, old lowest_valid_usn "
"0x%llx, new journal_id 0x%llx, new "
"lowest_valid_usn 0x%llx.",
(long long)sle64_to_cpu(uh->journal_id),
(long long)sle64_to_cpu(uh->lowest_valid_usn),
(long long)sle64_to_cpu(stamp),
i_size_read(vol->usnjrnl_j_ino));
uh->lowest_valid_usn =
cpu_to_sle64(i_size_read(vol->usnjrnl_j_ino));
uh->journal_id = stamp;
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
/* Set the flag so we do not have to do it again on remount. */
NVolSetUsnJrnlStamped(vol);
}
ntfs_debug("Done.");
return true;
}
#endif /* NTFS_RW */
| linux-master | fs/ntfs/usnjrnl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* mst.c - NTFS multi sector transfer protection handling code. Part of the
* Linux-NTFS project.
*
* Copyright (c) 2001-2004 Anton Altaparmakov
*/
#include "ntfs.h"
/**
* post_read_mst_fixup - deprotect multi sector transfer protected data
* @b: pointer to the data to deprotect
* @size: size in bytes of @b
*
* Perform the necessary post read multi sector transfer fixup and detect the
* presence of incomplete multi sector transfers. - In that case, overwrite the
* magic of the ntfs record header being processed with "BAAD" (in memory only!)
* and abort processing.
*
* Return 0 on success and -EINVAL on error ("BAAD" magic will be present).
*
* NOTE: We consider the absence / invalidity of an update sequence array to
* mean that the structure is not protected at all and hence doesn't need to
* be fixed up. Thus, we return success and not failure in this case. This is
* in contrast to pre_write_mst_fixup(), see below.
*/
int post_read_mst_fixup(NTFS_RECORD *b, const u32 size)
{
u16 usa_ofs, usa_count, usn;
u16 *usa_pos, *data_pos;
/* Setup the variables. */
usa_ofs = le16_to_cpu(b->usa_ofs);
/* Decrement usa_count to get number of fixups. */
usa_count = le16_to_cpu(b->usa_count) - 1;
/* Size and alignment checks. */
if ( size & (NTFS_BLOCK_SIZE - 1) ||
usa_ofs & 1 ||
usa_ofs + (usa_count * 2) > size ||
(size >> NTFS_BLOCK_SIZE_BITS) != usa_count)
return 0;
/* Position of usn in update sequence array. */
usa_pos = (u16*)b + usa_ofs/sizeof(u16);
/*
* The update sequence number which has to be equal to each of the
* u16 values before they are fixed up. Note no need to care for
* endianness since we are comparing and moving data for on disk
* structures which means the data is consistent. - If it is
* consistenty the wrong endianness it doesn't make any difference.
*/
usn = *usa_pos;
/*
* Position in protected data of first u16 that needs fixing up.
*/
data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1;
/*
* Check for incomplete multi sector transfer(s).
*/
while (usa_count--) {
if (*data_pos != usn) {
/*
* Incomplete multi sector transfer detected! )-:
* Set the magic to "BAAD" and return failure.
* Note that magic_BAAD is already converted to le32.
*/
b->magic = magic_BAAD;
return -EINVAL;
}
data_pos += NTFS_BLOCK_SIZE/sizeof(u16);
}
/* Re-setup the variables. */
usa_count = le16_to_cpu(b->usa_count) - 1;
data_pos = (u16*)b + NTFS_BLOCK_SIZE/sizeof(u16) - 1;
/* Fixup all sectors. */
while (usa_count--) {
/*
* Increment position in usa and restore original data from
* the usa into the data buffer.
*/
*data_pos = *(++usa_pos);
/* Increment position in data as well. */
data_pos += NTFS_BLOCK_SIZE/sizeof(u16);
}
return 0;
}
/**
* pre_write_mst_fixup - apply multi sector transfer protection
* @b: pointer to the data to protect
* @size: size in bytes of @b
*
* Perform the necessary pre write multi sector transfer fixup on the data
* pointer to by @b of @size.
*
* Return 0 if fixup applied (success) or -EINVAL if no fixup was performed
* (assumed not needed). This is in contrast to post_read_mst_fixup() above.
*
* NOTE: We consider the absence / invalidity of an update sequence array to
* mean that the structure is not subject to protection and hence doesn't need
* to be fixed up. This means that you have to create a valid update sequence
* array header in the ntfs record before calling this function, otherwise it
* will fail (the header needs to contain the position of the update sequence
* array together with the number of elements in the array). You also need to
* initialise the update sequence number before calling this function
* otherwise a random word will be used (whatever was in the record at that
* position at that time).
*/
int pre_write_mst_fixup(NTFS_RECORD *b, const u32 size)
{
le16 *usa_pos, *data_pos;
u16 usa_ofs, usa_count, usn;
le16 le_usn;
/* Sanity check + only fixup if it makes sense. */
if (!b || ntfs_is_baad_record(b->magic) ||
ntfs_is_hole_record(b->magic))
return -EINVAL;
/* Setup the variables. */
usa_ofs = le16_to_cpu(b->usa_ofs);
/* Decrement usa_count to get number of fixups. */
usa_count = le16_to_cpu(b->usa_count) - 1;
/* Size and alignment checks. */
if ( size & (NTFS_BLOCK_SIZE - 1) ||
usa_ofs & 1 ||
usa_ofs + (usa_count * 2) > size ||
(size >> NTFS_BLOCK_SIZE_BITS) != usa_count)
return -EINVAL;
/* Position of usn in update sequence array. */
usa_pos = (le16*)((u8*)b + usa_ofs);
/*
* Cyclically increment the update sequence number
* (skipping 0 and -1, i.e. 0xffff).
*/
usn = le16_to_cpup(usa_pos) + 1;
if (usn == 0xffff || !usn)
usn = 1;
le_usn = cpu_to_le16(usn);
*usa_pos = le_usn;
/* Position in data of first u16 that needs fixing up. */
data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1;
/* Fixup all sectors. */
while (usa_count--) {
/*
* Increment the position in the usa and save the
* original data from the data buffer into the usa.
*/
*(++usa_pos) = *data_pos;
/* Apply fixup to data. */
*data_pos = le_usn;
/* Increment position in data as well. */
data_pos += NTFS_BLOCK_SIZE/sizeof(le16);
}
return 0;
}
/**
* post_write_mst_fixup - fast deprotect multi sector transfer protected data
* @b: pointer to the data to deprotect
*
* Perform the necessary post write multi sector transfer fixup, not checking
* for any errors, because we assume we have just used pre_write_mst_fixup(),
* thus the data will be fine or we would never have gotten here.
*/
void post_write_mst_fixup(NTFS_RECORD *b)
{
le16 *usa_pos, *data_pos;
u16 usa_ofs = le16_to_cpu(b->usa_ofs);
u16 usa_count = le16_to_cpu(b->usa_count) - 1;
/* Position of usn in update sequence array. */
usa_pos = (le16*)b + usa_ofs/sizeof(le16);
/* Position in protected data of first u16 that needs fixing up. */
data_pos = (le16*)b + NTFS_BLOCK_SIZE/sizeof(le16) - 1;
/* Fixup all sectors. */
while (usa_count--) {
/*
* Increment position in usa and restore original data from
* the usa into the data buffer.
*/
*data_pos = *(++usa_pos);
/* Increment position in data as well. */
data_pos += NTFS_BLOCK_SIZE/sizeof(le16);
}
}
| linux-master | fs/ntfs/mst.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* upcase.c - Generate the full NTFS Unicode upcase table in little endian.
* Part of the Linux-NTFS project.
*
* Copyright (c) 2001 Richard Russon <[email protected]>
* Copyright (c) 2001-2006 Anton Altaparmakov
*/
#include "malloc.h"
#include "ntfs.h"
ntfschar *generate_default_upcase(void)
{
static const int uc_run_table[][3] = { /* Start, End, Add */
{0x0061, 0x007B, -32}, {0x0451, 0x045D, -80}, {0x1F70, 0x1F72, 74},
{0x00E0, 0x00F7, -32}, {0x045E, 0x0460, -80}, {0x1F72, 0x1F76, 86},
{0x00F8, 0x00FF, -32}, {0x0561, 0x0587, -48}, {0x1F76, 0x1F78, 100},
{0x0256, 0x0258, -205}, {0x1F00, 0x1F08, 8}, {0x1F78, 0x1F7A, 128},
{0x028A, 0x028C, -217}, {0x1F10, 0x1F16, 8}, {0x1F7A, 0x1F7C, 112},
{0x03AC, 0x03AD, -38}, {0x1F20, 0x1F28, 8}, {0x1F7C, 0x1F7E, 126},
{0x03AD, 0x03B0, -37}, {0x1F30, 0x1F38, 8}, {0x1FB0, 0x1FB2, 8},
{0x03B1, 0x03C2, -32}, {0x1F40, 0x1F46, 8}, {0x1FD0, 0x1FD2, 8},
{0x03C2, 0x03C3, -31}, {0x1F51, 0x1F52, 8}, {0x1FE0, 0x1FE2, 8},
{0x03C3, 0x03CC, -32}, {0x1F53, 0x1F54, 8}, {0x1FE5, 0x1FE6, 7},
{0x03CC, 0x03CD, -64}, {0x1F55, 0x1F56, 8}, {0x2170, 0x2180, -16},
{0x03CD, 0x03CF, -63}, {0x1F57, 0x1F58, 8}, {0x24D0, 0x24EA, -26},
{0x0430, 0x0450, -32}, {0x1F60, 0x1F68, 8}, {0xFF41, 0xFF5B, -32},
{0}
};
static const int uc_dup_table[][2] = { /* Start, End */
{0x0100, 0x012F}, {0x01A0, 0x01A6}, {0x03E2, 0x03EF}, {0x04CB, 0x04CC},
{0x0132, 0x0137}, {0x01B3, 0x01B7}, {0x0460, 0x0481}, {0x04D0, 0x04EB},
{0x0139, 0x0149}, {0x01CD, 0x01DD}, {0x0490, 0x04BF}, {0x04EE, 0x04F5},
{0x014A, 0x0178}, {0x01DE, 0x01EF}, {0x04BF, 0x04BF}, {0x04F8, 0x04F9},
{0x0179, 0x017E}, {0x01F4, 0x01F5}, {0x04C1, 0x04C4}, {0x1E00, 0x1E95},
{0x018B, 0x018B}, {0x01FA, 0x0218}, {0x04C7, 0x04C8}, {0x1EA0, 0x1EF9},
{0}
};
static const int uc_word_table[][2] = { /* Offset, Value */
{0x00FF, 0x0178}, {0x01AD, 0x01AC}, {0x01F3, 0x01F1}, {0x0269, 0x0196},
{0x0183, 0x0182}, {0x01B0, 0x01AF}, {0x0253, 0x0181}, {0x026F, 0x019C},
{0x0185, 0x0184}, {0x01B9, 0x01B8}, {0x0254, 0x0186}, {0x0272, 0x019D},
{0x0188, 0x0187}, {0x01BD, 0x01BC}, {0x0259, 0x018F}, {0x0275, 0x019F},
{0x018C, 0x018B}, {0x01C6, 0x01C4}, {0x025B, 0x0190}, {0x0283, 0x01A9},
{0x0192, 0x0191}, {0x01C9, 0x01C7}, {0x0260, 0x0193}, {0x0288, 0x01AE},
{0x0199, 0x0198}, {0x01CC, 0x01CA}, {0x0263, 0x0194}, {0x0292, 0x01B7},
{0x01A8, 0x01A7}, {0x01DD, 0x018E}, {0x0268, 0x0197},
{0}
};
int i, r;
ntfschar *uc;
uc = ntfs_malloc_nofs(default_upcase_len * sizeof(ntfschar));
if (!uc)
return uc;
memset(uc, 0, default_upcase_len * sizeof(ntfschar));
/* Generate the little endian Unicode upcase table used by ntfs. */
for (i = 0; i < default_upcase_len; i++)
uc[i] = cpu_to_le16(i);
for (r = 0; uc_run_table[r][0]; r++)
for (i = uc_run_table[r][0]; i < uc_run_table[r][1]; i++)
le16_add_cpu(&uc[i], uc_run_table[r][2]);
for (r = 0; uc_dup_table[r][0]; r++)
for (i = uc_dup_table[r][0]; i < uc_dup_table[r][1]; i += 2)
le16_add_cpu(&uc[i + 1], -1);
for (r = 0; uc_word_table[r][0]; r++)
uc[uc_word_table[r][0]] = cpu_to_le16(uc_word_table[r][1]);
return uc;
}
| linux-master | fs/ntfs/upcase.c |
#include <linux/module.h>
#include "hostfs.h"
EXPORT_SYMBOL_GPL(stat_file);
EXPORT_SYMBOL_GPL(access_file);
EXPORT_SYMBOL_GPL(open_file);
EXPORT_SYMBOL_GPL(open_dir);
EXPORT_SYMBOL_GPL(seek_dir);
EXPORT_SYMBOL_GPL(read_dir);
EXPORT_SYMBOL_GPL(read_file);
EXPORT_SYMBOL_GPL(write_file);
EXPORT_SYMBOL_GPL(lseek_file);
EXPORT_SYMBOL_GPL(fsync_file);
EXPORT_SYMBOL_GPL(replace_file);
EXPORT_SYMBOL_GPL(close_file);
EXPORT_SYMBOL_GPL(close_dir);
EXPORT_SYMBOL_GPL(file_create);
EXPORT_SYMBOL_GPL(set_attr);
EXPORT_SYMBOL_GPL(make_symlink);
EXPORT_SYMBOL_GPL(unlink_file);
EXPORT_SYMBOL_GPL(do_mkdir);
EXPORT_SYMBOL_GPL(hostfs_do_rmdir);
EXPORT_SYMBOL_GPL(do_mknod);
EXPORT_SYMBOL_GPL(link_file);
EXPORT_SYMBOL_GPL(hostfs_do_readlink);
EXPORT_SYMBOL_GPL(rename_file);
EXPORT_SYMBOL_GPL(rename2_file);
EXPORT_SYMBOL_GPL(do_statfs);
| linux-master | fs/hostfs/hostfs_user_exp.c |
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*
* Ported the filesystem routines to 2.5.
* 2003-02-10 Petr Baudis <[email protected]>
*/
#include <linux/fs.h>
#include <linux/magic.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/statfs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/writeback.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include "hostfs.h"
#include <init.h>
#include <kern.h>
struct hostfs_inode_info {
int fd;
fmode_t mode;
struct inode vfs_inode;
struct mutex open_mutex;
dev_t dev;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
{
return list_entry(inode, struct hostfs_inode_info, vfs_inode);
}
#define FILE_HOSTFS_I(file) HOSTFS_I(file_inode(file))
static struct kmem_cache *hostfs_inode_cache;
/* Changed in hostfs_args before the kernel starts running */
static char *root_ino = "";
static int append = 0;
static const struct inode_operations hostfs_iops;
static const struct inode_operations hostfs_dir_iops;
static const struct inode_operations hostfs_link_iops;
#ifndef MODULE
static int __init hostfs_args(char *options, int *add)
{
char *ptr;
ptr = strchr(options, ',');
if (ptr != NULL)
*ptr++ = '\0';
if (*options != '\0')
root_ino = options;
options = ptr;
while (options) {
ptr = strchr(options, ',');
if (ptr != NULL)
*ptr++ = '\0';
if (*options != '\0') {
if (!strcmp(options, "append"))
append = 1;
else printf("hostfs_args - unsupported option - %s\n",
options);
}
options = ptr;
}
return 0;
}
__uml_setup("hostfs=", hostfs_args,
"hostfs=<root dir>,<flags>,...\n"
" This is used to set hostfs parameters. The root directory argument\n"
" is used to confine all hostfs mounts to within the specified directory\n"
" tree on the host. If this isn't specified, then a user inside UML can\n"
" mount anything on the host that's accessible to the user that's running\n"
" it.\n"
" The only flag currently supported is 'append', which specifies that all\n"
" files opened by hostfs will be opened in append mode.\n\n"
);
#endif
static char *__dentry_name(struct dentry *dentry, char *name)
{
char *p = dentry_path_raw(dentry, name, PATH_MAX);
char *root;
size_t len;
root = dentry->d_sb->s_fs_info;
len = strlen(root);
if (IS_ERR(p)) {
__putname(name);
return NULL;
}
/*
* This function relies on the fact that dentry_path_raw() will place
* the path name at the end of the provided buffer.
*/
BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
strscpy(name, root, PATH_MAX);
if (len > p - name) {
__putname(name);
return NULL;
}
if (p > name + len)
strcpy(name + len, p);
return name;
}
static char *dentry_name(struct dentry *dentry)
{
char *name = __getname();
if (!name)
return NULL;
return __dentry_name(dentry, name);
}
static char *inode_name(struct inode *ino)
{
struct dentry *dentry;
char *name;
dentry = d_find_alias(ino);
if (!dentry)
return NULL;
name = dentry_name(dentry);
dput(dentry);
return name;
}
static char *follow_link(char *link)
{
char *name, *resolved, *end;
int n;
name = kmalloc(PATH_MAX, GFP_KERNEL);
if (!name) {
n = -ENOMEM;
goto out_free;
}
n = hostfs_do_readlink(link, name, PATH_MAX);
if (n < 0)
goto out_free;
else if (n == PATH_MAX) {
n = -E2BIG;
goto out_free;
}
if (*name == '/')
return name;
end = strrchr(link, '/');
if (end == NULL)
return name;
*(end + 1) = '\0';
resolved = kasprintf(GFP_KERNEL, "%s%s", link, name);
if (resolved == NULL) {
n = -ENOMEM;
goto out_free;
}
kfree(name);
return resolved;
out_free:
kfree(name);
return ERR_PTR(n);
}
static int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
/*
* do_statfs uses struct statfs64 internally, but the linux kernel
* struct statfs still has 32-bit versions for most of these fields,
* so we convert them here
*/
int err;
long long f_blocks;
long long f_bfree;
long long f_bavail;
long long f_files;
long long f_ffree;
err = do_statfs(dentry->d_sb->s_fs_info,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
&sf->f_namelen);
if (err)
return err;
sf->f_blocks = f_blocks;
sf->f_bfree = f_bfree;
sf->f_bavail = f_bavail;
sf->f_files = f_files;
sf->f_ffree = f_ffree;
sf->f_type = HOSTFS_SUPER_MAGIC;
return 0;
}
static struct inode *hostfs_alloc_inode(struct super_block *sb)
{
struct hostfs_inode_info *hi;
hi = alloc_inode_sb(sb, hostfs_inode_cache, GFP_KERNEL_ACCOUNT);
if (hi == NULL)
return NULL;
hi->fd = -1;
hi->mode = 0;
hi->dev = 0;
inode_init_once(&hi->vfs_inode);
mutex_init(&hi->open_mutex);
return &hi->vfs_inode;
}
static void hostfs_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (HOSTFS_I(inode)->fd != -1) {
close_file(&HOSTFS_I(inode)->fd);
HOSTFS_I(inode)->fd = -1;
HOSTFS_I(inode)->dev = 0;
}
}
static void hostfs_free_inode(struct inode *inode)
{
kmem_cache_free(hostfs_inode_cache, HOSTFS_I(inode));
}
static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
{
const char *root_path = root->d_sb->s_fs_info;
size_t offset = strlen(root_ino) + 1;
if (strlen(root_path) > offset)
seq_show_option(seq, root_path + offset, NULL);
if (append)
seq_puts(seq, ",append");
return 0;
}
static const struct super_operations hostfs_sbops = {
.alloc_inode = hostfs_alloc_inode,
.free_inode = hostfs_free_inode,
.drop_inode = generic_delete_inode,
.evict_inode = hostfs_evict_inode,
.statfs = hostfs_statfs,
.show_options = hostfs_show_options,
};
static int hostfs_readdir(struct file *file, struct dir_context *ctx)
{
void *dir;
char *name;
unsigned long long next, ino;
int error, len;
unsigned int type;
name = dentry_name(file->f_path.dentry);
if (name == NULL)
return -ENOMEM;
dir = open_dir(name, &error);
__putname(name);
if (dir == NULL)
return -error;
next = ctx->pos;
seek_dir(dir, next);
while ((name = read_dir(dir, &next, &ino, &len, &type)) != NULL) {
if (!dir_emit(ctx, name, len, ino, type))
break;
ctx->pos = next;
}
close_dir(dir);
return 0;
}
static int hostfs_open(struct inode *ino, struct file *file)
{
char *name;
fmode_t mode;
int err;
int r, w, fd;
mode = file->f_mode & (FMODE_READ | FMODE_WRITE);
if ((mode & HOSTFS_I(ino)->mode) == mode)
return 0;
mode |= HOSTFS_I(ino)->mode;
retry:
r = w = 0;
if (mode & FMODE_READ)
r = 1;
if (mode & FMODE_WRITE)
r = w = 1;
name = dentry_name(file_dentry(file));
if (name == NULL)
return -ENOMEM;
fd = open_file(name, r, w, append);
__putname(name);
if (fd < 0)
return fd;
mutex_lock(&HOSTFS_I(ino)->open_mutex);
/* somebody else had handled it first? */
if ((mode & HOSTFS_I(ino)->mode) == mode) {
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
close_file(&fd);
return 0;
}
if ((mode | HOSTFS_I(ino)->mode) != mode) {
mode |= HOSTFS_I(ino)->mode;
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
close_file(&fd);
goto retry;
}
if (HOSTFS_I(ino)->fd == -1) {
HOSTFS_I(ino)->fd = fd;
} else {
err = replace_file(fd, HOSTFS_I(ino)->fd);
close_file(&fd);
if (err < 0) {
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
return err;
}
}
HOSTFS_I(ino)->mode = mode;
mutex_unlock(&HOSTFS_I(ino)->open_mutex);
return 0;
}
static int hostfs_file_release(struct inode *inode, struct file *file)
{
filemap_write_and_wait(inode->i_mapping);
return 0;
}
static int hostfs_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file->f_mapping->host;
int ret;
ret = file_write_and_wait_range(file, start, end);
if (ret)
return ret;
inode_lock(inode);
ret = fsync_file(HOSTFS_I(inode)->fd, datasync);
inode_unlock(inode);
return ret;
}
static const struct file_operations hostfs_file_fops = {
.llseek = generic_file_llseek,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.open = hostfs_open,
.release = hostfs_file_release,
.fsync = hostfs_fsync,
};
static const struct file_operations hostfs_dir_fops = {
.llseek = generic_file_llseek,
.iterate_shared = hostfs_readdir,
.read = generic_read_dir,
.open = hostfs_open,
.fsync = hostfs_fsync,
};
static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
char *buffer;
loff_t base = page_offset(page);
int count = PAGE_SIZE;
int end_index = inode->i_size >> PAGE_SHIFT;
int err;
if (page->index >= end_index)
count = inode->i_size & (PAGE_SIZE-1);
buffer = kmap_local_page(page);
err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
if (err != count) {
if (err >= 0)
err = -EIO;
mapping_set_error(mapping, err);
goto out;
}
if (base > inode->i_size)
inode->i_size = base;
err = 0;
out:
kunmap_local(buffer);
unlock_page(page);
return err;
}
static int hostfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
char *buffer;
loff_t start = page_offset(page);
int bytes_read, ret = 0;
buffer = kmap_local_page(page);
bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer,
PAGE_SIZE);
if (bytes_read < 0) {
ClearPageUptodate(page);
SetPageError(page);
ret = bytes_read;
goto out;
}
memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read);
ClearPageError(page);
SetPageUptodate(page);
out:
flush_dcache_page(page);
kunmap_local(buffer);
unlock_page(page);
return ret;
}
static int hostfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
*pagep = grab_cache_page_write_begin(mapping, index);
if (!*pagep)
return -ENOMEM;
return 0;
}
static int hostfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
void *buffer;
unsigned from = pos & (PAGE_SIZE - 1);
int err;
buffer = kmap_local_page(page);
err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied);
kunmap_local(buffer);
if (!PageUptodate(page) && err == PAGE_SIZE)
SetPageUptodate(page);
/*
* If err > 0, write_file has added err to pos, so we are comparing
* i_size against the last byte written.
*/
if (err > 0 && (pos > inode->i_size))
inode->i_size = pos;
unlock_page(page);
put_page(page);
return err;
}
static const struct address_space_operations hostfs_aops = {
.writepage = hostfs_writepage,
.read_folio = hostfs_read_folio,
.dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,
};
static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
{
set_nlink(ino, st->nlink);
i_uid_write(ino, st->uid);
i_gid_write(ino, st->gid);
ino->i_atime =
(struct timespec64){ st->atime.tv_sec, st->atime.tv_nsec };
ino->i_mtime =
(struct timespec64){ st->mtime.tv_sec, st->mtime.tv_nsec };
inode_set_ctime(ino, st->ctime.tv_sec, st->ctime.tv_nsec);
ino->i_size = st->size;
ino->i_blocks = st->blocks;
return 0;
}
static int hostfs_inode_set(struct inode *ino, void *data)
{
struct hostfs_stat *st = data;
dev_t rdev;
/* Reencode maj and min with the kernel encoding.*/
rdev = MKDEV(st->maj, st->min);
switch (st->mode & S_IFMT) {
case S_IFLNK:
ino->i_op = &hostfs_link_iops;
break;
case S_IFDIR:
ino->i_op = &hostfs_dir_iops;
ino->i_fop = &hostfs_dir_fops;
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
init_special_inode(ino, st->mode & S_IFMT, rdev);
ino->i_op = &hostfs_iops;
break;
case S_IFREG:
ino->i_op = &hostfs_iops;
ino->i_fop = &hostfs_file_fops;
ino->i_mapping->a_ops = &hostfs_aops;
break;
default:
return -EIO;
}
HOSTFS_I(ino)->dev = st->dev;
ino->i_ino = st->ino;
ino->i_mode = st->mode;
return hostfs_inode_update(ino, st);
}
static int hostfs_inode_test(struct inode *inode, void *data)
{
const struct hostfs_stat *st = data;
return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == st->dev;
}
static struct inode *hostfs_iget(struct super_block *sb, char *name)
{
struct inode *inode;
struct hostfs_stat st;
int err = stat_file(name, &st, -1);
if (err)
return ERR_PTR(err);
inode = iget5_locked(sb, st.ino, hostfs_inode_test, hostfs_inode_set,
&st);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
unlock_new_inode(inode);
} else {
spin_lock(&inode->i_lock);
hostfs_inode_update(inode, &st);
spin_unlock(&inode->i_lock);
}
return inode;
}
static int hostfs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct inode *inode;
char *name;
int fd;
name = dentry_name(dentry);
if (name == NULL)
return -ENOMEM;
fd = file_create(name, mode & 0777);
if (fd < 0) {
__putname(name);
return fd;
}
inode = hostfs_iget(dir->i_sb, name);
__putname(name);
if (IS_ERR(inode))
return PTR_ERR(inode);
HOSTFS_I(inode)->fd = fd;
HOSTFS_I(inode)->mode = FMODE_READ | FMODE_WRITE;
d_instantiate(dentry, inode);
return 0;
}
static struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = NULL;
char *name;
name = dentry_name(dentry);
if (name == NULL)
return ERR_PTR(-ENOMEM);
inode = hostfs_iget(ino->i_sb, name);
__putname(name);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ENOENT)
inode = NULL;
else
return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
static int hostfs_link(struct dentry *to, struct inode *ino,
struct dentry *from)
{
char *from_name, *to_name;
int err;
if ((from_name = dentry_name(from)) == NULL)
return -ENOMEM;
to_name = dentry_name(to);
if (to_name == NULL) {
__putname(from_name);
return -ENOMEM;
}
err = link_file(to_name, from_name);
__putname(from_name);
__putname(to_name);
return err;
}
static int hostfs_unlink(struct inode *ino, struct dentry *dentry)
{
char *file;
int err;
if (append)
return -EPERM;
if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = unlink_file(file);
__putname(file);
return err;
}
static int hostfs_symlink(struct mnt_idmap *idmap, struct inode *ino,
struct dentry *dentry, const char *to)
{
char *file;
int err;
if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = make_symlink(file, to);
__putname(file);
return err;
}
static int hostfs_mkdir(struct mnt_idmap *idmap, struct inode *ino,
struct dentry *dentry, umode_t mode)
{
char *file;
int err;
if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = do_mkdir(file, mode);
__putname(file);
return err;
}
static int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
{
char *file;
int err;
if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = hostfs_do_rmdir(file);
__putname(file);
return err;
}
static int hostfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
char *name;
int err;
name = dentry_name(dentry);
if (name == NULL)
return -ENOMEM;
err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
if (err) {
__putname(name);
return err;
}
inode = hostfs_iget(dir->i_sb, name);
__putname(name);
if (IS_ERR(inode))
return PTR_ERR(inode);
d_instantiate(dentry, inode);
return 0;
}
static int hostfs_rename2(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
char *old_name, *new_name;
int err;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
old_name = dentry_name(old_dentry);
if (old_name == NULL)
return -ENOMEM;
new_name = dentry_name(new_dentry);
if (new_name == NULL) {
__putname(old_name);
return -ENOMEM;
}
if (!flags)
err = rename_file(old_name, new_name);
else
err = rename2_file(old_name, new_name, flags);
__putname(old_name);
__putname(new_name);
return err;
}
static int hostfs_permission(struct mnt_idmap *idmap,
struct inode *ino, int desired)
{
char *name;
int r = 0, w = 0, x = 0, err;
if (desired & MAY_NOT_BLOCK)
return -ECHILD;
if (desired & MAY_READ) r = 1;
if (desired & MAY_WRITE) w = 1;
if (desired & MAY_EXEC) x = 1;
name = inode_name(ino);
if (name == NULL)
return -ENOMEM;
if (S_ISCHR(ino->i_mode) || S_ISBLK(ino->i_mode) ||
S_ISFIFO(ino->i_mode) || S_ISSOCK(ino->i_mode))
err = 0;
else
err = access_file(name, r, w, x);
__putname(name);
if (!err)
err = generic_permission(&nop_mnt_idmap, ino, desired);
return err;
}
static int hostfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct hostfs_iattr attrs;
char *name;
int err;
int fd = HOSTFS_I(inode)->fd;
err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (err)
return err;
if (append)
attr->ia_valid &= ~ATTR_SIZE;
attrs.ia_valid = 0;
if (attr->ia_valid & ATTR_MODE) {
attrs.ia_valid |= HOSTFS_ATTR_MODE;
attrs.ia_mode = attr->ia_mode;
}
if (attr->ia_valid & ATTR_UID) {
attrs.ia_valid |= HOSTFS_ATTR_UID;
attrs.ia_uid = from_kuid(&init_user_ns, attr->ia_uid);
}
if (attr->ia_valid & ATTR_GID) {
attrs.ia_valid |= HOSTFS_ATTR_GID;
attrs.ia_gid = from_kgid(&init_user_ns, attr->ia_gid);
}
if (attr->ia_valid & ATTR_SIZE) {
attrs.ia_valid |= HOSTFS_ATTR_SIZE;
attrs.ia_size = attr->ia_size;
}
if (attr->ia_valid & ATTR_ATIME) {
attrs.ia_valid |= HOSTFS_ATTR_ATIME;
attrs.ia_atime = (struct hostfs_timespec)
{ attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec };
}
if (attr->ia_valid & ATTR_MTIME) {
attrs.ia_valid |= HOSTFS_ATTR_MTIME;
attrs.ia_mtime = (struct hostfs_timespec)
{ attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec };
}
if (attr->ia_valid & ATTR_CTIME) {
attrs.ia_valid |= HOSTFS_ATTR_CTIME;
attrs.ia_ctime = (struct hostfs_timespec)
{ attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec };
}
if (attr->ia_valid & ATTR_ATIME_SET) {
attrs.ia_valid |= HOSTFS_ATTR_ATIME_SET;
}
if (attr->ia_valid & ATTR_MTIME_SET) {
attrs.ia_valid |= HOSTFS_ATTR_MTIME_SET;
}
name = dentry_name(dentry);
if (name == NULL)
return -ENOMEM;
err = set_attr(name, &attrs, fd);
__putname(name);
if (err)
return err;
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode))
truncate_setsize(inode, attr->ia_size);
setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
static const struct inode_operations hostfs_iops = {
.permission = hostfs_permission,
.setattr = hostfs_setattr,
};
static const struct inode_operations hostfs_dir_iops = {
.create = hostfs_create,
.lookup = hostfs_lookup,
.link = hostfs_link,
.unlink = hostfs_unlink,
.symlink = hostfs_symlink,
.mkdir = hostfs_mkdir,
.rmdir = hostfs_rmdir,
.mknod = hostfs_mknod,
.rename = hostfs_rename2,
.permission = hostfs_permission,
.setattr = hostfs_setattr,
};
static const char *hostfs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
char *link;
if (!dentry)
return ERR_PTR(-ECHILD);
link = kmalloc(PATH_MAX, GFP_KERNEL);
if (link) {
char *path = dentry_name(dentry);
int err = -ENOMEM;
if (path) {
err = hostfs_do_readlink(path, link, PATH_MAX);
if (err == PATH_MAX)
err = -E2BIG;
__putname(path);
}
if (err < 0) {
kfree(link);
return ERR_PTR(err);
}
} else {
return ERR_PTR(-ENOMEM);
}
set_delayed_call(done, kfree_link, link);
return link;
}
static const struct inode_operations hostfs_link_iops = {
.get_link = hostfs_get_link,
};
static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
{
struct inode *root_inode;
char *host_root_path, *req_root = d;
int err;
sb->s_blocksize = 1024;
sb->s_blocksize_bits = 10;
sb->s_magic = HOSTFS_SUPER_MAGIC;
sb->s_op = &hostfs_sbops;
sb->s_d_op = &simple_dentry_operations;
sb->s_maxbytes = MAX_LFS_FILESIZE;
err = super_setup_bdi(sb);
if (err)
return err;
/* NULL is printed as '(null)' by printf(): avoid that. */
if (req_root == NULL)
req_root = "";
sb->s_fs_info = host_root_path =
kasprintf(GFP_KERNEL, "%s/%s", root_ino, req_root);
if (host_root_path == NULL)
return -ENOMEM;
root_inode = hostfs_iget(sb, host_root_path);
if (IS_ERR(root_inode))
return PTR_ERR(root_inode);
if (S_ISLNK(root_inode->i_mode)) {
char *name;
iput(root_inode);
name = follow_link(host_root_path);
if (IS_ERR(name))
return PTR_ERR(name);
root_inode = hostfs_iget(sb, name);
kfree(name);
if (IS_ERR(root_inode))
return PTR_ERR(root_inode);
}
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL)
return -ENOMEM;
return 0;
}
static struct dentry *hostfs_read_sb(struct file_system_type *type,
int flags, const char *dev_name,
void *data)
{
return mount_nodev(type, flags, data, hostfs_fill_sb_common);
}
static void hostfs_kill_sb(struct super_block *s)
{
kill_anon_super(s);
kfree(s->s_fs_info);
}
static struct file_system_type hostfs_type = {
.owner = THIS_MODULE,
.name = "hostfs",
.mount = hostfs_read_sb,
.kill_sb = hostfs_kill_sb,
.fs_flags = 0,
};
MODULE_ALIAS_FS("hostfs");
static int __init init_hostfs(void)
{
hostfs_inode_cache = KMEM_CACHE(hostfs_inode_info, 0);
if (!hostfs_inode_cache)
return -ENOMEM;
return register_filesystem(&hostfs_type);
}
static void __exit exit_hostfs(void)
{
unregister_filesystem(&hostfs_type);
kmem_cache_destroy(hostfs_inode_cache);
}
module_init(init_hostfs)
module_exit(exit_hostfs)
MODULE_LICENSE("GPL");
| linux-master | fs/hostfs/hostfs_kern.c |
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdio.h>
#include <stddef.h>
#include <unistd.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <sys/syscall.h>
#include "hostfs.h"
#include <utime.h>
static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
{
p->ino = buf->st_ino;
p->mode = buf->st_mode;
p->nlink = buf->st_nlink;
p->uid = buf->st_uid;
p->gid = buf->st_gid;
p->size = buf->st_size;
p->atime.tv_sec = buf->st_atime;
p->atime.tv_nsec = 0;
p->ctime.tv_sec = buf->st_ctime;
p->ctime.tv_nsec = 0;
p->mtime.tv_sec = buf->st_mtime;
p->mtime.tv_nsec = 0;
p->blksize = buf->st_blksize;
p->blocks = buf->st_blocks;
p->maj = os_major(buf->st_rdev);
p->min = os_minor(buf->st_rdev);
p->dev = buf->st_dev;
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
struct stat64 buf;
if (fd >= 0) {
if (fstat64(fd, &buf) < 0)
return -errno;
} else if (lstat64(path, &buf) < 0) {
return -errno;
}
stat64_to_hostfs(&buf, p);
return 0;
}
int access_file(char *path, int r, int w, int x)
{
int mode = 0;
if (r)
mode = R_OK;
if (w)
mode |= W_OK;
if (x)
mode |= X_OK;
if (access(path, mode) != 0)
return -errno;
else return 0;
}
int open_file(char *path, int r, int w, int append)
{
int mode = 0, fd;
if (r && !w)
mode = O_RDONLY;
else if (!r && w)
mode = O_WRONLY;
else if (r && w)
mode = O_RDWR;
else panic("Impossible mode in open_file");
if (append)
mode |= O_APPEND;
fd = open64(path, mode);
if (fd < 0)
return -errno;
else return fd;
}
void *open_dir(char *path, int *err_out)
{
DIR *dir;
dir = opendir(path);
*err_out = errno;
return dir;
}
void seek_dir(void *stream, unsigned long long pos)
{
DIR *dir = stream;
seekdir(dir, pos);
}
char *read_dir(void *stream, unsigned long long *pos_out,
unsigned long long *ino_out, int *len_out,
unsigned int *type_out)
{
DIR *dir = stream;
struct dirent *ent;
ent = readdir(dir);
if (ent == NULL)
return NULL;
*len_out = strlen(ent->d_name);
*ino_out = ent->d_ino;
*type_out = ent->d_type;
*pos_out = ent->d_off;
return ent->d_name;
}
int read_file(int fd, unsigned long long *offset, char *buf, int len)
{
int n;
n = pread64(fd, buf, len, *offset);
if (n < 0)
return -errno;
*offset += n;
return n;
}
int write_file(int fd, unsigned long long *offset, const char *buf, int len)
{
int n;
n = pwrite64(fd, buf, len, *offset);
if (n < 0)
return -errno;
*offset += n;
return n;
}
int lseek_file(int fd, long long offset, int whence)
{
int ret;
ret = lseek64(fd, offset, whence);
if (ret < 0)
return -errno;
return 0;
}
int fsync_file(int fd, int datasync)
{
int ret;
if (datasync)
ret = fdatasync(fd);
else
ret = fsync(fd);
if (ret < 0)
return -errno;
return 0;
}
int replace_file(int oldfd, int fd)
{
return dup2(oldfd, fd);
}
void close_file(void *stream)
{
close(*((int *) stream));
}
void close_dir(void *stream)
{
closedir(stream);
}
int file_create(char *name, int mode)
{
int fd;
fd = open64(name, O_CREAT | O_RDWR, mode);
if (fd < 0)
return -errno;
return fd;
}
int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
{
struct hostfs_stat st;
struct timeval times[2];
int err, ma;
if (attrs->ia_valid & HOSTFS_ATTR_MODE) {
if (fd >= 0) {
if (fchmod(fd, attrs->ia_mode) != 0)
return -errno;
} else if (chmod(file, attrs->ia_mode) != 0) {
return -errno;
}
}
if (attrs->ia_valid & HOSTFS_ATTR_UID) {
if (fd >= 0) {
if (fchown(fd, attrs->ia_uid, -1))
return -errno;
} else if (chown(file, attrs->ia_uid, -1)) {
return -errno;
}
}
if (attrs->ia_valid & HOSTFS_ATTR_GID) {
if (fd >= 0) {
if (fchown(fd, -1, attrs->ia_gid))
return -errno;
} else if (chown(file, -1, attrs->ia_gid)) {
return -errno;
}
}
if (attrs->ia_valid & HOSTFS_ATTR_SIZE) {
if (fd >= 0) {
if (ftruncate(fd, attrs->ia_size))
return -errno;
} else if (truncate(file, attrs->ia_size)) {
return -errno;
}
}
/*
* Update accessed and/or modified time, in two parts: first set
* times according to the changes to perform, and then call futimes()
* or utimes() to apply them.
*/
ma = (HOSTFS_ATTR_ATIME_SET | HOSTFS_ATTR_MTIME_SET);
if (attrs->ia_valid & ma) {
err = stat_file(file, &st, fd);
if (err != 0)
return err;
times[0].tv_sec = st.atime.tv_sec;
times[0].tv_usec = st.atime.tv_nsec / 1000;
times[1].tv_sec = st.mtime.tv_sec;
times[1].tv_usec = st.mtime.tv_nsec / 1000;
if (attrs->ia_valid & HOSTFS_ATTR_ATIME_SET) {
times[0].tv_sec = attrs->ia_atime.tv_sec;
times[0].tv_usec = attrs->ia_atime.tv_nsec / 1000;
}
if (attrs->ia_valid & HOSTFS_ATTR_MTIME_SET) {
times[1].tv_sec = attrs->ia_mtime.tv_sec;
times[1].tv_usec = attrs->ia_mtime.tv_nsec / 1000;
}
if (fd >= 0) {
if (futimes(fd, times) != 0)
return -errno;
} else if (utimes(file, times) != 0) {
return -errno;
}
}
/* Note: ctime is not handled */
if (attrs->ia_valid & (HOSTFS_ATTR_ATIME | HOSTFS_ATTR_MTIME)) {
err = stat_file(file, &st, fd);
attrs->ia_atime = st.atime;
attrs->ia_mtime = st.mtime;
if (err != 0)
return err;
}
return 0;
}
int make_symlink(const char *from, const char *to)
{
int err;
err = symlink(to, from);
if (err)
return -errno;
return 0;
}
int unlink_file(const char *file)
{
int err;
err = unlink(file);
if (err)
return -errno;
return 0;
}
int do_mkdir(const char *file, int mode)
{
int err;
err = mkdir(file, mode);
if (err)
return -errno;
return 0;
}
int hostfs_do_rmdir(const char *file)
{
int err;
err = rmdir(file);
if (err)
return -errno;
return 0;
}
int do_mknod(const char *file, int mode, unsigned int major, unsigned int minor)
{
int err;
err = mknod(file, mode, os_makedev(major, minor));
if (err)
return -errno;
return 0;
}
int link_file(const char *to, const char *from)
{
int err;
err = link(to, from);
if (err)
return -errno;
return 0;
}
int hostfs_do_readlink(char *file, char *buf, int size)
{
int n;
n = readlink(file, buf, size);
if (n < 0)
return -errno;
if (n < size)
buf[n] = '\0';
return n;
}
int rename_file(char *from, char *to)
{
int err;
err = rename(from, to);
if (err < 0)
return -errno;
return 0;
}
int rename2_file(char *from, char *to, unsigned int flags)
{
int err;
#ifndef SYS_renameat2
# ifdef __x86_64__
# define SYS_renameat2 316
# endif
# ifdef __i386__
# define SYS_renameat2 353
# endif
#endif
#ifdef SYS_renameat2
err = syscall(SYS_renameat2, AT_FDCWD, from, AT_FDCWD, to, flags);
if (err < 0) {
if (errno != ENOSYS)
return -errno;
else
return -EINVAL;
}
return 0;
#else
return -EINVAL;
#endif
}
int do_statfs(char *root, long *bsize_out, long long *blocks_out,
long long *bfree_out, long long *bavail_out,
long long *files_out, long long *ffree_out,
void *fsid_out, int fsid_size, long *namelen_out)
{
struct statfs64 buf;
int err;
err = statfs64(root, &buf);
if (err < 0)
return -errno;
*bsize_out = buf.f_bsize;
*blocks_out = buf.f_blocks;
*bfree_out = buf.f_bfree;
*bavail_out = buf.f_bavail;
*files_out = buf.f_files;
*ffree_out = buf.f_ffree;
memcpy(fsid_out, &buf.f_fsid,
sizeof(buf.f_fsid) > fsid_size ? fsid_size :
sizeof(buf.f_fsid));
*namelen_out = buf.f_namelen;
return 0;
}
| linux-master | fs/hostfs/hostfs_user.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2003 Erez Zadok
* Copyright (C) 2001-2003 Stony Brook University
* Copyright (C) 2004-2006 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompson <[email protected]>
*/
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/key.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/file.h>
#include <linux/statfs.h>
#include <linux/magic.h>
#include "ecryptfs_kernel.h"
struct kmem_cache *ecryptfs_inode_info_cache;
/**
* ecryptfs_alloc_inode - allocate an ecryptfs inode
* @sb: Pointer to the ecryptfs super block
*
* Called to bring an inode into existence.
*
* Only handle allocation, setting up structures should be done in
* ecryptfs_read_inode. This is because the kernel, between now and
* then, will 0 out the private data pointer.
*
* Returns a pointer to a newly allocated inode, NULL otherwise
*/
static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
{
struct ecryptfs_inode_info *inode_info;
struct inode *inode = NULL;
inode_info = alloc_inode_sb(sb, ecryptfs_inode_info_cache, GFP_KERNEL);
if (unlikely(!inode_info))
goto out;
if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) {
kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
goto out;
}
mutex_init(&inode_info->lower_file_mutex);
atomic_set(&inode_info->lower_file_count, 0);
inode_info->lower_file = NULL;
inode = &inode_info->vfs_inode;
out:
return inode;
}
static void ecryptfs_free_inode(struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
}
/**
* ecryptfs_destroy_inode
* @inode: The ecryptfs inode
*
* This is used during the final destruction of the inode. All
* allocation of memory related to the inode, including allocated
* memory in the crypt_stat struct, will be released here.
* There should be no chance that this deallocation will be missed.
*/
static void ecryptfs_destroy_inode(struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
BUG_ON(inode_info->lower_file);
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
}
/**
* ecryptfs_statfs
* @dentry: The ecryptfs dentry
* @buf: The struct kstatfs to fill in with stats
*
* Get the filesystem statistics. Currently, we let this pass right through
* to the lower filesystem and take no action ourselves.
*/
static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc;
if (!lower_dentry->d_sb->s_op->statfs)
return -ENOSYS;
rc = lower_dentry->d_sb->s_op->statfs(lower_dentry, buf);
if (rc)
return rc;
buf->f_type = ECRYPTFS_SUPER_MAGIC;
rc = ecryptfs_set_f_namelen(&buf->f_namelen, buf->f_namelen,
&ecryptfs_superblock_to_private(dentry->d_sb)->mount_crypt_stat);
return rc;
}
/**
* ecryptfs_evict_inode
* @inode: The ecryptfs inode
*
* Called by iput() when the inode reference count reached zero
* and the inode is not hashed anywhere. Used to clear anything
* that needs to be, before the inode is completely destroyed and put
* on the inode free list. We use this to drop out reference to the
* lower inode.
*/
static void ecryptfs_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
iput(ecryptfs_inode_to_lower(inode));
}
/*
* ecryptfs_show_options
*
* Prints the mount options for a given superblock.
* Returns zero; does not fail.
*/
static int ecryptfs_show_options(struct seq_file *m, struct dentry *root)
{
struct super_block *sb = root->d_sb;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
struct ecryptfs_global_auth_tok *walker;
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry(walker,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
if (walker->flags & ECRYPTFS_AUTH_TOK_FNEK)
seq_printf(m, ",ecryptfs_fnek_sig=%s", walker->sig);
else
seq_printf(m, ",ecryptfs_sig=%s", walker->sig);
}
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
seq_printf(m, ",ecryptfs_cipher=%s",
mount_crypt_stat->global_default_cipher_name);
if (mount_crypt_stat->global_default_cipher_key_size)
seq_printf(m, ",ecryptfs_key_bytes=%zd",
mount_crypt_stat->global_default_cipher_key_size);
if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)
seq_printf(m, ",ecryptfs_passthrough");
if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED)
seq_printf(m, ",ecryptfs_xattr_metadata");
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
seq_printf(m, ",ecryptfs_encrypted_view");
if (mount_crypt_stat->flags & ECRYPTFS_UNLINK_SIGS)
seq_printf(m, ",ecryptfs_unlink_sigs");
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY)
seq_printf(m, ",ecryptfs_mount_auth_tok_only");
return 0;
}
const struct super_operations ecryptfs_sops = {
.alloc_inode = ecryptfs_alloc_inode,
.destroy_inode = ecryptfs_destroy_inode,
.free_inode = ecryptfs_free_inode,
.statfs = ecryptfs_statfs,
.remount_fs = NULL,
.evict_inode = ecryptfs_evict_inode,
.show_options = ecryptfs_show_options
};
| linux-master | fs/ecryptfs/super.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 2004-2008 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Tyler Hicks <[email protected]>
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
#include <linux/nsproxy.h>
#include "ecryptfs_kernel.h"
static LIST_HEAD(ecryptfs_msg_ctx_free_list);
static LIST_HEAD(ecryptfs_msg_ctx_alloc_list);
static DEFINE_MUTEX(ecryptfs_msg_ctx_lists_mux);
static struct hlist_head *ecryptfs_daemon_hash;
DEFINE_MUTEX(ecryptfs_daemon_hash_mux);
static int ecryptfs_hash_bits;
#define ecryptfs_current_euid_hash(uid) \
hash_long((unsigned long)from_kuid(&init_user_ns, current_euid()), ecryptfs_hash_bits)
static u32 ecryptfs_msg_counter;
static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
/**
* ecryptfs_acquire_free_msg_ctx
* @msg_ctx: The context that was acquired from the free list
*
* Acquires a context element from the free list and locks the mutex
* on the context. Sets the msg_ctx task to current. Returns zero on
* success; non-zero on error or upon failure to acquire a free
* context element. Must be called with ecryptfs_msg_ctx_lists_mux
* held.
*/
static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx)
{
struct list_head *p;
int rc;
if (list_empty(&ecryptfs_msg_ctx_free_list)) {
printk(KERN_WARNING "%s: The eCryptfs free "
"context list is empty. It may be helpful to "
"specify the ecryptfs_message_buf_len "
"parameter to be greater than the current "
"value of [%d]\n", __func__, ecryptfs_message_buf_len);
rc = -ENOMEM;
goto out;
}
list_for_each(p, &ecryptfs_msg_ctx_free_list) {
*msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node);
if (mutex_trylock(&(*msg_ctx)->mux)) {
(*msg_ctx)->task = current;
rc = 0;
goto out;
}
}
rc = -ENOMEM;
out:
return rc;
}
/**
* ecryptfs_msg_ctx_free_to_alloc
* @msg_ctx: The context to move from the free list to the alloc list
*
* Must be called with ecryptfs_msg_ctx_lists_mux held.
*/
static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx)
{
list_move(&msg_ctx->node, &ecryptfs_msg_ctx_alloc_list);
msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_PENDING;
msg_ctx->counter = ++ecryptfs_msg_counter;
}
/**
* ecryptfs_msg_ctx_alloc_to_free
* @msg_ctx: The context to move from the alloc list to the free list
*
* Must be called with ecryptfs_msg_ctx_lists_mux held.
*/
void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
{
list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list);
kfree(msg_ctx->msg);
msg_ctx->msg = NULL;
msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE;
}
/**
* ecryptfs_find_daemon_by_euid
* @daemon: If return value is zero, points to the desired daemon pointer
*
* Must be called with ecryptfs_daemon_hash_mux held.
*
* Search the hash list for the current effective user id.
*
* Returns zero if the user id exists in the list; non-zero otherwise.
*/
int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
{
int rc;
hlist_for_each_entry(*daemon,
&ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
euid_chain) {
if (uid_eq((*daemon)->file->f_cred->euid, current_euid())) {
rc = 0;
goto out;
}
}
rc = -EINVAL;
out:
return rc;
}
/**
* ecryptfs_spawn_daemon - Create and initialize a new daemon struct
* @daemon: Pointer to set to newly allocated daemon struct
* @file: File used when opening /dev/ecryptfs
*
* Must be called ceremoniously while in possession of
* ecryptfs_sacred_daemon_hash_mux
*
* Returns zero on success; non-zero otherwise
*/
int
ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file)
{
int rc = 0;
(*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL);
if (!(*daemon)) {
rc = -ENOMEM;
goto out;
}
(*daemon)->file = file;
mutex_init(&(*daemon)->mux);
INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue);
init_waitqueue_head(&(*daemon)->wait);
(*daemon)->num_queued_msg_ctx = 0;
hlist_add_head(&(*daemon)->euid_chain,
&ecryptfs_daemon_hash[ecryptfs_current_euid_hash()]);
out:
return rc;
}
/*
* ecryptfs_exorcise_daemon - Destroy the daemon struct
*
* Must be called ceremoniously while in possession of
* ecryptfs_daemon_hash_mux and the daemon's own mux.
*/
int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon)
{
struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp;
int rc = 0;
mutex_lock(&daemon->mux);
if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ)
|| (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) {
rc = -EBUSY;
mutex_unlock(&daemon->mux);
goto out;
}
list_for_each_entry_safe(msg_ctx, msg_ctx_tmp,
&daemon->msg_ctx_out_queue, daemon_out_list) {
list_del(&msg_ctx->daemon_out_list);
daemon->num_queued_msg_ctx--;
printk(KERN_WARNING "%s: Warning: dropping message that is in "
"the out queue of a dying daemon\n", __func__);
ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
}
hlist_del(&daemon->euid_chain);
mutex_unlock(&daemon->mux);
kfree_sensitive(daemon);
out:
return rc;
}
/**
* ecryptfs_process_response
* @daemon: eCryptfs daemon object
* @msg: The ecryptfs message received; the caller should sanity check
* msg->data_len and free the memory
* @seq: The sequence number of the message; must match the sequence
* number for the existing message context waiting for this
* response
*
* Processes a response message after sending an operation request to
* userspace. Some other process is awaiting this response. Before
* sending out its first communications, the other process allocated a
* msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The
* response message contains this index so that we can copy over the
* response message into the msg_ctx that the process holds a
* reference to. The other process is going to wake up, check to see
* that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then
* proceed to read off and process the response message. Returns zero
* upon delivery to desired context element; non-zero upon delivery
* failure or error.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
struct ecryptfs_message *msg, u32 seq)
{
struct ecryptfs_msg_ctx *msg_ctx;
size_t msg_size;
int rc;
if (msg->index >= ecryptfs_message_buf_len) {
rc = -EINVAL;
printk(KERN_ERR "%s: Attempt to reference "
"context buffer at index [%d]; maximum "
"allowable is [%d]\n", __func__, msg->index,
(ecryptfs_message_buf_len - 1));
goto out;
}
msg_ctx = &ecryptfs_msg_ctx_arr[msg->index];
mutex_lock(&msg_ctx->mux);
if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) {
rc = -EINVAL;
printk(KERN_WARNING "%s: Desired context element is not "
"pending a response\n", __func__);
goto unlock;
} else if (msg_ctx->counter != seq) {
rc = -EINVAL;
printk(KERN_WARNING "%s: Invalid message sequence; "
"expected [%d]; received [%d]\n", __func__,
msg_ctx->counter, seq);
goto unlock;
}
msg_size = (sizeof(*msg) + msg->data_len);
msg_ctx->msg = kmemdup(msg, msg_size, GFP_KERNEL);
if (!msg_ctx->msg) {
rc = -ENOMEM;
goto unlock;
}
msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE;
wake_up_process(msg_ctx->task);
rc = 0;
unlock:
mutex_unlock(&msg_ctx->mux);
out:
return rc;
}
/**
* ecryptfs_send_message_locked
* @data: The data to send
* @data_len: The length of data
* @msg_type: Type of message
* @msg_ctx: The message context allocated for the send
*
* Must be called with ecryptfs_daemon_hash_mux held.
*
* Returns zero on success; non-zero otherwise
*/
static int
ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type,
struct ecryptfs_msg_ctx **msg_ctx)
{
struct ecryptfs_daemon *daemon;
int rc;
rc = ecryptfs_find_daemon_by_euid(&daemon);
if (rc) {
rc = -ENOTCONN;
goto out;
}
mutex_lock(&ecryptfs_msg_ctx_lists_mux);
rc = ecryptfs_acquire_free_msg_ctx(msg_ctx);
if (rc) {
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
printk(KERN_WARNING "%s: Could not claim a free "
"context element\n", __func__);
goto out;
}
ecryptfs_msg_ctx_free_to_alloc(*msg_ctx);
mutex_unlock(&(*msg_ctx)->mux);
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type, 0,
daemon);
if (rc)
printk(KERN_ERR "%s: Error attempting to send message to "
"userspace daemon; rc = [%d]\n", __func__, rc);
out:
return rc;
}
/**
* ecryptfs_send_message
* @data: The data to send
* @data_len: The length of data
* @msg_ctx: The message context allocated for the send
*
* Grabs ecryptfs_daemon_hash_mux.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_send_message(char *data, int data_len,
struct ecryptfs_msg_ctx **msg_ctx)
{
int rc;
mutex_lock(&ecryptfs_daemon_hash_mux);
rc = ecryptfs_send_message_locked(data, data_len, ECRYPTFS_MSG_REQUEST,
msg_ctx);
mutex_unlock(&ecryptfs_daemon_hash_mux);
return rc;
}
/**
* ecryptfs_wait_for_response
* @msg_ctx: The context that was assigned when sending a message
* @msg: The incoming message from userspace; not set if rc != 0
*
* Sleeps until awaken by ecryptfs_receive_message or until the amount
* of time exceeds ecryptfs_message_wait_timeout. If zero is
* returned, msg will point to a valid message from userspace; a
* non-zero value is returned upon failure to receive a message or an
* error occurs. Callee must free @msg on success.
*/
int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
struct ecryptfs_message **msg)
{
signed long timeout = ecryptfs_message_wait_timeout * HZ;
int rc = 0;
sleep:
timeout = schedule_timeout_interruptible(timeout);
mutex_lock(&ecryptfs_msg_ctx_lists_mux);
mutex_lock(&msg_ctx->mux);
if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_DONE) {
if (timeout) {
mutex_unlock(&msg_ctx->mux);
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
goto sleep;
}
rc = -ENOMSG;
} else {
*msg = msg_ctx->msg;
msg_ctx->msg = NULL;
}
ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
mutex_unlock(&msg_ctx->mux);
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
return rc;
}
int __init ecryptfs_init_messaging(void)
{
int i;
int rc = 0;
if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) {
ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS;
printk(KERN_WARNING "%s: Specified number of users is "
"too large, defaulting to [%d] users\n", __func__,
ecryptfs_number_of_users);
}
mutex_lock(&ecryptfs_daemon_hash_mux);
ecryptfs_hash_bits = 1;
while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
ecryptfs_hash_bits++;
ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
* (1 << ecryptfs_hash_bits)),
GFP_KERNEL);
if (!ecryptfs_daemon_hash) {
rc = -ENOMEM;
mutex_unlock(&ecryptfs_daemon_hash_mux);
goto out;
}
for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
mutex_unlock(&ecryptfs_daemon_hash_mux);
ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
* ecryptfs_message_buf_len),
GFP_KERNEL);
if (!ecryptfs_msg_ctx_arr) {
kfree(ecryptfs_daemon_hash);
rc = -ENOMEM;
goto out;
}
mutex_lock(&ecryptfs_msg_ctx_lists_mux);
ecryptfs_msg_counter = 0;
for (i = 0; i < ecryptfs_message_buf_len; i++) {
INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node);
INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list);
mutex_init(&ecryptfs_msg_ctx_arr[i].mux);
mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
ecryptfs_msg_ctx_arr[i].index = i;
ecryptfs_msg_ctx_arr[i].state = ECRYPTFS_MSG_CTX_STATE_FREE;
ecryptfs_msg_ctx_arr[i].counter = 0;
ecryptfs_msg_ctx_arr[i].task = NULL;
ecryptfs_msg_ctx_arr[i].msg = NULL;
list_add_tail(&ecryptfs_msg_ctx_arr[i].node,
&ecryptfs_msg_ctx_free_list);
mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux);
}
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
rc = ecryptfs_init_ecryptfs_miscdev();
if (rc)
ecryptfs_release_messaging();
out:
return rc;
}
void ecryptfs_release_messaging(void)
{
if (ecryptfs_msg_ctx_arr) {
int i;
mutex_lock(&ecryptfs_msg_ctx_lists_mux);
for (i = 0; i < ecryptfs_message_buf_len; i++) {
mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
kfree(ecryptfs_msg_ctx_arr[i].msg);
mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux);
}
kfree(ecryptfs_msg_ctx_arr);
mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
}
if (ecryptfs_daemon_hash) {
struct ecryptfs_daemon *daemon;
struct hlist_node *n;
int i;
mutex_lock(&ecryptfs_daemon_hash_mux);
for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
int rc;
hlist_for_each_entry_safe(daemon, n,
&ecryptfs_daemon_hash[i],
euid_chain) {
rc = ecryptfs_exorcise_daemon(daemon);
if (rc)
printk(KERN_ERR "%s: Error whilst "
"attempting to destroy daemon; "
"rc = [%d]. Dazed and confused, "
"but trying to continue.\n",
__func__, rc);
}
}
kfree(ecryptfs_daemon_hash);
mutex_unlock(&ecryptfs_daemon_hash_mux);
}
ecryptfs_destroy_ecryptfs_miscdev();
return;
}
| linux-master | fs/ecryptfs/messaging.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
* In-kernel key management code. Includes functions to parse and
* write authentication token-related packets with the underlying
* file.
*
* Copyright (C) 2004-2006 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompson <[email protected]>
* Trevor S. Highland <[email protected]>
*/
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/key.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "ecryptfs_kernel.h"
/*
* request_key returned an error instead of a valid key address;
* determine the type of error, make appropriate log entries, and
* return an error code.
*/
static int process_request_key_err(long err_code)
{
int rc = 0;
switch (err_code) {
case -ENOKEY:
ecryptfs_printk(KERN_WARNING, "No key\n");
rc = -ENOENT;
break;
case -EKEYEXPIRED:
ecryptfs_printk(KERN_WARNING, "Key expired\n");
rc = -ETIME;
break;
case -EKEYREVOKED:
ecryptfs_printk(KERN_WARNING, "Key revoked\n");
rc = -EINVAL;
break;
default:
ecryptfs_printk(KERN_WARNING, "Unknown error code: "
"[0x%.16lx]\n", err_code);
rc = -EINVAL;
}
return rc;
}
static int process_find_global_auth_tok_for_sig_err(int err_code)
{
int rc = err_code;
switch (err_code) {
case -ENOENT:
ecryptfs_printk(KERN_WARNING, "Missing auth tok\n");
break;
case -EINVAL:
ecryptfs_printk(KERN_WARNING, "Invalid auth tok\n");
break;
default:
rc = process_request_key_err(err_code);
break;
}
return rc;
}
/**
* ecryptfs_parse_packet_length
* @data: Pointer to memory containing length at offset
* @size: This function writes the decoded size to this memory
* address; zero on error
* @length_size: The number of bytes occupied by the encoded length
*
* Returns zero on success; non-zero on error
*/
int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
size_t *length_size)
{
int rc = 0;
(*length_size) = 0;
(*size) = 0;
if (data[0] < 192) {
/* One-byte length */
(*size) = data[0];
(*length_size) = 1;
} else if (data[0] < 224) {
/* Two-byte length */
(*size) = (data[0] - 192) * 256;
(*size) += data[1] + 192;
(*length_size) = 2;
} else if (data[0] == 255) {
/* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
ecryptfs_printk(KERN_ERR, "Five-byte packet length not "
"supported\n");
rc = -EINVAL;
goto out;
} else {
ecryptfs_printk(KERN_ERR, "Error parsing packet length\n");
rc = -EINVAL;
goto out;
}
out:
return rc;
}
/**
* ecryptfs_write_packet_length
* @dest: The byte array target into which to write the length. Must
* have at least ECRYPTFS_MAX_PKT_LEN_SIZE bytes allocated.
* @size: The length to write.
* @packet_size_length: The number of bytes used to encode the packet
* length is written to this address.
*
* Returns zero on success; non-zero on error.
*/
int ecryptfs_write_packet_length(char *dest, size_t size,
size_t *packet_size_length)
{
int rc = 0;
if (size < 192) {
dest[0] = size;
(*packet_size_length) = 1;
} else if (size < 65536) {
dest[0] = (((size - 192) / 256) + 192);
dest[1] = ((size - 192) % 256);
(*packet_size_length) = 2;
} else {
/* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
rc = -EINVAL;
ecryptfs_printk(KERN_WARNING,
"Unsupported packet size: [%zd]\n", size);
}
return rc;
}
static int
write_tag_64_packet(char *signature, struct ecryptfs_session_key *session_key,
char **packet, size_t *packet_len)
{
size_t i = 0;
size_t data_len;
size_t packet_size_len;
char *message;
int rc;
/*
* ***** TAG 64 Packet Format *****
* | Content Type | 1 byte |
* | Key Identifier Size | 1 or 2 bytes |
* | Key Identifier | arbitrary |
* | Encrypted File Encryption Key Size | 1 or 2 bytes |
* | Encrypted File Encryption Key | arbitrary |
*/
data_len = (5 + ECRYPTFS_SIG_SIZE_HEX
+ session_key->encrypted_key_size);
*packet = kmalloc(data_len, GFP_KERNEL);
message = *packet;
if (!message) {
ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
message[i++] = ECRYPTFS_TAG_64_PACKET_TYPE;
rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
&packet_size_len);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet "
"header; cannot generate packet length\n");
goto out;
}
i += packet_size_len;
memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX);
i += ECRYPTFS_SIG_SIZE_HEX;
rc = ecryptfs_write_packet_length(&message[i],
session_key->encrypted_key_size,
&packet_size_len);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet "
"header; cannot generate packet length\n");
goto out;
}
i += packet_size_len;
memcpy(&message[i], session_key->encrypted_key,
session_key->encrypted_key_size);
i += session_key->encrypted_key_size;
*packet_len = i;
out:
return rc;
}
static int
parse_tag_65_packet(struct ecryptfs_session_key *session_key, u8 *cipher_code,
struct ecryptfs_message *msg)
{
size_t i = 0;
char *data;
size_t data_len;
size_t m_size;
size_t message_len;
u16 checksum = 0;
u16 expected_checksum = 0;
int rc;
/*
* ***** TAG 65 Packet Format *****
* | Content Type | 1 byte |
* | Status Indicator | 1 byte |
* | File Encryption Key Size | 1 or 2 bytes |
* | File Encryption Key | arbitrary |
*/
message_len = msg->data_len;
data = msg->data;
if (message_len < 4) {
rc = -EIO;
goto out;
}
if (data[i++] != ECRYPTFS_TAG_65_PACKET_TYPE) {
ecryptfs_printk(KERN_ERR, "Type should be ECRYPTFS_TAG_65\n");
rc = -EIO;
goto out;
}
if (data[i++]) {
ecryptfs_printk(KERN_ERR, "Status indicator has non-zero value "
"[%d]\n", data[i-1]);
rc = -EIO;
goto out;
}
rc = ecryptfs_parse_packet_length(&data[i], &m_size, &data_len);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
"rc = [%d]\n", rc);
goto out;
}
i += data_len;
if (message_len < (i + m_size)) {
ecryptfs_printk(KERN_ERR, "The message received from ecryptfsd "
"is shorter than expected\n");
rc = -EIO;
goto out;
}
if (m_size < 3) {
ecryptfs_printk(KERN_ERR,
"The decrypted key is not long enough to "
"include a cipher code and checksum\n");
rc = -EIO;
goto out;
}
*cipher_code = data[i++];
/* The decrypted key includes 1 byte cipher code and 2 byte checksum */
session_key->decrypted_key_size = m_size - 3;
if (session_key->decrypted_key_size > ECRYPTFS_MAX_KEY_BYTES) {
ecryptfs_printk(KERN_ERR, "key_size [%d] larger than "
"the maximum key size [%d]\n",
session_key->decrypted_key_size,
ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES);
rc = -EIO;
goto out;
}
memcpy(session_key->decrypted_key, &data[i],
session_key->decrypted_key_size);
i += session_key->decrypted_key_size;
expected_checksum += (unsigned char)(data[i++]) << 8;
expected_checksum += (unsigned char)(data[i++]);
for (i = 0; i < session_key->decrypted_key_size; i++)
checksum += session_key->decrypted_key[i];
if (expected_checksum != checksum) {
ecryptfs_printk(KERN_ERR, "Invalid checksum for file "
"encryption key; expected [%x]; calculated "
"[%x]\n", expected_checksum, checksum);
rc = -EIO;
}
out:
return rc;
}
static int
write_tag_66_packet(char *signature, u8 cipher_code,
struct ecryptfs_crypt_stat *crypt_stat, char **packet,
size_t *packet_len)
{
size_t i = 0;
size_t j;
size_t data_len;
size_t checksum = 0;
size_t packet_size_len;
char *message;
int rc;
/*
* ***** TAG 66 Packet Format *****
* | Content Type | 1 byte |
* | Key Identifier Size | 1 or 2 bytes |
* | Key Identifier | arbitrary |
* | File Encryption Key Size | 1 or 2 bytes |
* | File Encryption Key | arbitrary |
*/
data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
*packet = kmalloc(data_len, GFP_KERNEL);
message = *packet;
if (!message) {
ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
message[i++] = ECRYPTFS_TAG_66_PACKET_TYPE;
rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
&packet_size_len);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet "
"header; cannot generate packet length\n");
goto out;
}
i += packet_size_len;
memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX);
i += ECRYPTFS_SIG_SIZE_HEX;
/* The encrypted key includes 1 byte cipher code and 2 byte checksum */
rc = ecryptfs_write_packet_length(&message[i], crypt_stat->key_size + 3,
&packet_size_len);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet "
"header; cannot generate packet length\n");
goto out;
}
i += packet_size_len;
message[i++] = cipher_code;
memcpy(&message[i], crypt_stat->key, crypt_stat->key_size);
i += crypt_stat->key_size;
for (j = 0; j < crypt_stat->key_size; j++)
checksum += crypt_stat->key[j];
message[i++] = (checksum / 256) % 256;
message[i++] = (checksum % 256);
*packet_len = i;
out:
return rc;
}
static int
parse_tag_67_packet(struct ecryptfs_key_record *key_rec,
struct ecryptfs_message *msg)
{
size_t i = 0;
char *data;
size_t data_len;
size_t message_len;
int rc;
/*
* ***** TAG 65 Packet Format *****
* | Content Type | 1 byte |
* | Status Indicator | 1 byte |
* | Encrypted File Encryption Key Size | 1 or 2 bytes |
* | Encrypted File Encryption Key | arbitrary |
*/
message_len = msg->data_len;
data = msg->data;
/* verify that everything through the encrypted FEK size is present */
if (message_len < 4) {
rc = -EIO;
printk(KERN_ERR "%s: message_len is [%zd]; minimum acceptable "
"message length is [%d]\n", __func__, message_len, 4);
goto out;
}
if (data[i++] != ECRYPTFS_TAG_67_PACKET_TYPE) {
rc = -EIO;
printk(KERN_ERR "%s: Type should be ECRYPTFS_TAG_67\n",
__func__);
goto out;
}
if (data[i++]) {
rc = -EIO;
printk(KERN_ERR "%s: Status indicator has non zero "
"value [%d]\n", __func__, data[i-1]);
goto out;
}
rc = ecryptfs_parse_packet_length(&data[i], &key_rec->enc_key_size,
&data_len);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
"rc = [%d]\n", rc);
goto out;
}
i += data_len;
if (message_len < (i + key_rec->enc_key_size)) {
rc = -EIO;
printk(KERN_ERR "%s: message_len [%zd]; max len is [%zd]\n",
__func__, message_len, (i + key_rec->enc_key_size));
goto out;
}
if (key_rec->enc_key_size > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
rc = -EIO;
printk(KERN_ERR "%s: Encrypted key_size [%zd] larger than "
"the maximum key size [%d]\n", __func__,
key_rec->enc_key_size,
ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES);
goto out;
}
memcpy(key_rec->enc_key, &data[i], key_rec->enc_key_size);
out:
return rc;
}
/**
* ecryptfs_verify_version
* @version: The version number to confirm
*
* Returns zero on good version; non-zero otherwise
*/
static int ecryptfs_verify_version(u16 version)
{
int rc = 0;
unsigned char major;
unsigned char minor;
major = ((version >> 8) & 0xFF);
minor = (version & 0xFF);
if (major != ECRYPTFS_VERSION_MAJOR) {
ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
"Expected [%d]; got [%d]\n",
ECRYPTFS_VERSION_MAJOR, major);
rc = -EINVAL;
goto out;
}
if (minor != ECRYPTFS_VERSION_MINOR) {
ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
"Expected [%d]; got [%d]\n",
ECRYPTFS_VERSION_MINOR, minor);
rc = -EINVAL;
goto out;
}
out:
return rc;
}
/**
* ecryptfs_verify_auth_tok_from_key
* @auth_tok_key: key containing the authentication token
* @auth_tok: authentication token
*
* Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
* -EKEYREVOKED if the key was revoked before we acquired its semaphore.
*/
static int
ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
struct ecryptfs_auth_tok **auth_tok)
{
int rc = 0;
(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
if (IS_ERR(*auth_tok)) {
rc = PTR_ERR(*auth_tok);
*auth_tok = NULL;
goto out;
}
if (ecryptfs_verify_version((*auth_tok)->version)) {
printk(KERN_ERR "Data structure version mismatch. Userspace "
"tools must match eCryptfs kernel module with major "
"version [%d] and minor version [%d]\n",
ECRYPTFS_VERSION_MAJOR, ECRYPTFS_VERSION_MINOR);
rc = -EINVAL;
goto out;
}
if ((*auth_tok)->token_type != ECRYPTFS_PASSWORD
&& (*auth_tok)->token_type != ECRYPTFS_PRIVATE_KEY) {
printk(KERN_ERR "Invalid auth_tok structure "
"returned from key query\n");
rc = -EINVAL;
goto out;
}
out:
return rc;
}
static int
ecryptfs_find_global_auth_tok_for_sig(
struct key **auth_tok_key,
struct ecryptfs_auth_tok **auth_tok,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat, char *sig)
{
struct ecryptfs_global_auth_tok *walker;
int rc = 0;
(*auth_tok_key) = NULL;
(*auth_tok) = NULL;
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry(walker,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX))
continue;
if (walker->flags & ECRYPTFS_AUTH_TOK_INVALID) {
rc = -EINVAL;
goto out;
}
rc = key_validate(walker->global_auth_tok_key);
if (rc) {
if (rc == -EKEYEXPIRED)
goto out;
goto out_invalid_auth_tok;
}
down_write(&(walker->global_auth_tok_key->sem));
rc = ecryptfs_verify_auth_tok_from_key(
walker->global_auth_tok_key, auth_tok);
if (rc)
goto out_invalid_auth_tok_unlock;
(*auth_tok_key) = walker->global_auth_tok_key;
key_get(*auth_tok_key);
goto out;
}
rc = -ENOENT;
goto out;
out_invalid_auth_tok_unlock:
up_write(&(walker->global_auth_tok_key->sem));
out_invalid_auth_tok:
printk(KERN_WARNING "Invalidating auth tok with sig = [%s]\n", sig);
walker->flags |= ECRYPTFS_AUTH_TOK_INVALID;
key_put(walker->global_auth_tok_key);
walker->global_auth_tok_key = NULL;
out:
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
return rc;
}
/**
* ecryptfs_find_auth_tok_for_sig
* @auth_tok_key: key containing the authentication token
* @auth_tok: Set to the matching auth_tok; NULL if not found
* @mount_crypt_stat: inode crypt_stat crypto context
* @sig: Sig of auth_tok to find
*
* For now, this function simply looks at the registered auth_tok's
* linked off the mount_crypt_stat, so all the auth_toks that can be
* used must be registered at mount time. This function could
* potentially try a lot harder to find auth_tok's (e.g., by calling
* out to ecryptfsd to dynamically retrieve an auth_tok object) so
* that static registration of auth_tok's will no longer be necessary.
*
* Returns zero on no error; non-zero on error
*/
static int
ecryptfs_find_auth_tok_for_sig(
struct key **auth_tok_key,
struct ecryptfs_auth_tok **auth_tok,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
char *sig)
{
int rc = 0;
rc = ecryptfs_find_global_auth_tok_for_sig(auth_tok_key, auth_tok,
mount_crypt_stat, sig);
if (rc == -ENOENT) {
/* if the flag ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY is set in the
* mount_crypt_stat structure, we prevent to use auth toks that
* are not inserted through the ecryptfs_add_global_auth_tok
* function.
*/
if (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY)
return -EINVAL;
rc = ecryptfs_keyring_auth_tok_for_sig(auth_tok_key, auth_tok,
sig);
}
return rc;
}
/*
* write_tag_70_packet can gobble a lot of stack space. We stuff most
* of the function's parameters in a kmalloc'd struct to help reduce
* eCryptfs' overall stack usage.
*/
struct ecryptfs_write_tag_70_packet_silly_stack {
u8 cipher_code;
size_t max_packet_size;
size_t packet_size_len;
size_t block_aligned_filename_size;
size_t block_size;
size_t i;
size_t j;
size_t num_rand_bytes;
struct mutex *tfm_mutex;
char *block_aligned_filename;
struct ecryptfs_auth_tok *auth_tok;
struct scatterlist src_sg[2];
struct scatterlist dst_sg[2];
struct crypto_skcipher *skcipher_tfm;
struct skcipher_request *skcipher_req;
char iv[ECRYPTFS_MAX_IV_BYTES];
char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
char tmp_hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
struct crypto_shash *hash_tfm;
struct shash_desc *hash_desc;
};
/*
* write_tag_70_packet - Write encrypted filename (EFN) packet against FNEK
* @filename: NULL-terminated filename string
*
* This is the simplest mechanism for achieving filename encryption in
* eCryptfs. It encrypts the given filename with the mount-wide
* filename encryption key (FNEK) and stores it in a packet to @dest,
* which the callee will encode and write directly into the dentry
* name.
*/
int
ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
size_t *packet_size,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
char *filename, size_t filename_size)
{
struct ecryptfs_write_tag_70_packet_silly_stack *s;
struct key *auth_tok_key = NULL;
int rc = 0;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
(*packet_size) = 0;
rc = ecryptfs_find_auth_tok_for_sig(
&auth_tok_key,
&s->auth_tok, mount_crypt_stat,
mount_crypt_stat->global_default_fnek_sig);
if (rc) {
printk(KERN_ERR "%s: Error attempting to find auth tok for "
"fnek sig [%s]; rc = [%d]\n", __func__,
mount_crypt_stat->global_default_fnek_sig, rc);
goto out;
}
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(
&s->skcipher_tfm,
&s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name);
if (unlikely(rc)) {
printk(KERN_ERR "Internal error whilst attempting to get "
"tfm and mutex for cipher name [%s]; rc = [%d]\n",
mount_crypt_stat->global_default_fn_cipher_name, rc);
goto out;
}
mutex_lock(s->tfm_mutex);
s->block_size = crypto_skcipher_blocksize(s->skcipher_tfm);
/* Plus one for the \0 separator between the random prefix
* and the plaintext filename */
s->num_rand_bytes = (ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES + 1);
s->block_aligned_filename_size = (s->num_rand_bytes + filename_size);
if ((s->block_aligned_filename_size % s->block_size) != 0) {
s->num_rand_bytes += (s->block_size
- (s->block_aligned_filename_size
% s->block_size));
s->block_aligned_filename_size = (s->num_rand_bytes
+ filename_size);
}
/* Octet 0: Tag 70 identifier
* Octets 1-N1: Tag 70 packet size (includes cipher identifier
* and block-aligned encrypted filename size)
* Octets N1-N2: FNEK sig (ECRYPTFS_SIG_SIZE)
* Octet N2-N3: Cipher identifier (1 octet)
* Octets N3-N4: Block-aligned encrypted filename
* - Consists of a minimum number of random characters, a \0
* separator, and then the filename */
s->max_packet_size = (ECRYPTFS_TAG_70_MAX_METADATA_SIZE
+ s->block_aligned_filename_size);
if (!dest) {
(*packet_size) = s->max_packet_size;
goto out_unlock;
}
if (s->max_packet_size > (*remaining_bytes)) {
printk(KERN_WARNING "%s: Require [%zd] bytes to write; only "
"[%zd] available\n", __func__, s->max_packet_size,
(*remaining_bytes));
rc = -EINVAL;
goto out_unlock;
}
s->skcipher_req = skcipher_request_alloc(s->skcipher_tfm, GFP_KERNEL);
if (!s->skcipher_req) {
printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
"skcipher_request_alloc for %s\n", __func__,
crypto_skcipher_driver_name(s->skcipher_tfm));
rc = -ENOMEM;
goto out_unlock;
}
skcipher_request_set_callback(s->skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
s->block_aligned_filename = kzalloc(s->block_aligned_filename_size,
GFP_KERNEL);
if (!s->block_aligned_filename) {
rc = -ENOMEM;
goto out_unlock;
}
dest[s->i++] = ECRYPTFS_TAG_70_PACKET_TYPE;
rc = ecryptfs_write_packet_length(&dest[s->i],
(ECRYPTFS_SIG_SIZE
+ 1 /* Cipher code */
+ s->block_aligned_filename_size),
&s->packet_size_len);
if (rc) {
printk(KERN_ERR "%s: Error generating tag 70 packet "
"header; cannot generate packet length; rc = [%d]\n",
__func__, rc);
goto out_free_unlock;
}
s->i += s->packet_size_len;
ecryptfs_from_hex(&dest[s->i],
mount_crypt_stat->global_default_fnek_sig,
ECRYPTFS_SIG_SIZE);
s->i += ECRYPTFS_SIG_SIZE;
s->cipher_code = ecryptfs_code_for_cipher_string(
mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
if (s->cipher_code == 0) {
printk(KERN_WARNING "%s: Unable to generate code for "
"cipher [%s] with key bytes [%zd]\n", __func__,
mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
rc = -EINVAL;
goto out_free_unlock;
}
dest[s->i++] = s->cipher_code;
/* TODO: Support other key modules than passphrase for
* filename encryption */
if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
rc = -EOPNOTSUPP;
printk(KERN_INFO "%s: Filename encryption only supports "
"password tokens\n", __func__);
goto out_free_unlock;
}
s->hash_tfm = crypto_alloc_shash(ECRYPTFS_TAG_70_DIGEST, 0, 0);
if (IS_ERR(s->hash_tfm)) {
rc = PTR_ERR(s->hash_tfm);
printk(KERN_ERR "%s: Error attempting to "
"allocate hash crypto context; rc = [%d]\n",
__func__, rc);
goto out_free_unlock;
}
s->hash_desc = kmalloc(sizeof(*s->hash_desc) +
crypto_shash_descsize(s->hash_tfm), GFP_KERNEL);
if (!s->hash_desc) {
rc = -ENOMEM;
goto out_release_free_unlock;
}
s->hash_desc->tfm = s->hash_tfm;
rc = crypto_shash_digest(s->hash_desc,
(u8 *)s->auth_tok->token.password.session_key_encryption_key,
s->auth_tok->token.password.session_key_encryption_key_bytes,
s->hash);
if (rc) {
printk(KERN_ERR
"%s: Error computing crypto hash; rc = [%d]\n",
__func__, rc);
goto out_release_free_unlock;
}
for (s->j = 0; s->j < (s->num_rand_bytes - 1); s->j++) {
s->block_aligned_filename[s->j] =
s->hash[(s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)];
if ((s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)
== (ECRYPTFS_TAG_70_DIGEST_SIZE - 1)) {
rc = crypto_shash_digest(s->hash_desc, (u8 *)s->hash,
ECRYPTFS_TAG_70_DIGEST_SIZE,
s->tmp_hash);
if (rc) {
printk(KERN_ERR
"%s: Error computing crypto hash; "
"rc = [%d]\n", __func__, rc);
goto out_release_free_unlock;
}
memcpy(s->hash, s->tmp_hash,
ECRYPTFS_TAG_70_DIGEST_SIZE);
}
if (s->block_aligned_filename[s->j] == '\0')
s->block_aligned_filename[s->j] = ECRYPTFS_NON_NULL;
}
memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
filename_size);
rc = virt_to_scatterlist(s->block_aligned_filename,
s->block_aligned_filename_size, s->src_sg, 2);
if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert filename memory to scatterlist; rc = [%d]. "
"block_aligned_filename_size = [%zd]\n", __func__, rc,
s->block_aligned_filename_size);
goto out_release_free_unlock;
}
rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
s->dst_sg, 2);
if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert encrypted filename memory to scatterlist; "
"rc = [%d]. block_aligned_filename_size = [%zd]\n",
__func__, rc, s->block_aligned_filename_size);
goto out_release_free_unlock;
}
/* The characters in the first block effectively do the job
* of the IV here, so we just use 0's for the IV. Note the
* constraint that ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES
* >= ECRYPTFS_MAX_IV_BYTES. */
rc = crypto_skcipher_setkey(
s->skcipher_tfm,
s->auth_tok->token.password.session_key_encryption_key,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
if (rc < 0) {
printk(KERN_ERR "%s: Error setting key for crypto context; "
"rc = [%d]. s->auth_tok->token.password.session_key_"
"encryption_key = [0x%p]; mount_crypt_stat->"
"global_default_fn_cipher_key_bytes = [%zd]\n", __func__,
rc,
s->auth_tok->token.password.session_key_encryption_key,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
goto out_release_free_unlock;
}
skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
s->block_aligned_filename_size, s->iv);
rc = crypto_skcipher_encrypt(s->skcipher_req);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt filename; "
"rc = [%d]\n", __func__, rc);
goto out_release_free_unlock;
}
s->i += s->block_aligned_filename_size;
(*packet_size) = s->i;
(*remaining_bytes) -= (*packet_size);
out_release_free_unlock:
crypto_free_shash(s->hash_tfm);
out_free_unlock:
kfree_sensitive(s->block_aligned_filename);
out_unlock:
mutex_unlock(s->tfm_mutex);
out:
if (auth_tok_key) {
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
}
skcipher_request_free(s->skcipher_req);
kfree_sensitive(s->hash_desc);
kfree(s);
return rc;
}
struct ecryptfs_parse_tag_70_packet_silly_stack {
u8 cipher_code;
size_t max_packet_size;
size_t packet_size_len;
size_t parsed_tag_70_packet_size;
size_t block_aligned_filename_size;
size_t block_size;
size_t i;
struct mutex *tfm_mutex;
char *decrypted_filename;
struct ecryptfs_auth_tok *auth_tok;
struct scatterlist src_sg[2];
struct scatterlist dst_sg[2];
struct crypto_skcipher *skcipher_tfm;
struct skcipher_request *skcipher_req;
char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
char iv[ECRYPTFS_MAX_IV_BYTES];
char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
};
/**
* ecryptfs_parse_tag_70_packet - Parse and process FNEK-encrypted passphrase packet
* @filename: This function kmalloc's the memory for the filename
* @filename_size: This function sets this to the amount of memory
* kmalloc'd for the filename
* @packet_size: This function sets this to the the number of octets
* in the packet parsed
* @mount_crypt_stat: The mount-wide cryptographic context
* @data: The memory location containing the start of the tag 70
* packet
* @max_packet_size: The maximum legal size of the packet to be parsed
* from @data
*
* Returns zero on success; non-zero otherwise
*/
int
ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
size_t *packet_size,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
char *data, size_t max_packet_size)
{
struct ecryptfs_parse_tag_70_packet_silly_stack *s;
struct key *auth_tok_key = NULL;
int rc = 0;
(*packet_size) = 0;
(*filename_size) = 0;
(*filename) = NULL;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be "
"at least [%d]\n", __func__, max_packet_size,
ECRYPTFS_TAG_70_MIN_METADATA_SIZE);
rc = -EINVAL;
goto out;
}
/* Octet 0: Tag 70 identifier
* Octets 1-N1: Tag 70 packet size (includes cipher identifier
* and block-aligned encrypted filename size)
* Octets N1-N2: FNEK sig (ECRYPTFS_SIG_SIZE)
* Octet N2-N3: Cipher identifier (1 octet)
* Octets N3-N4: Block-aligned encrypted filename
* - Consists of a minimum number of random numbers, a \0
* separator, and then the filename */
if (data[(*packet_size)++] != ECRYPTFS_TAG_70_PACKET_TYPE) {
printk(KERN_WARNING "%s: Invalid packet tag [0x%.2x]; must be "
"tag [0x%.2x]\n", __func__,
data[((*packet_size) - 1)], ECRYPTFS_TAG_70_PACKET_TYPE);
rc = -EINVAL;
goto out;
}
rc = ecryptfs_parse_packet_length(&data[(*packet_size)],
&s->parsed_tag_70_packet_size,
&s->packet_size_len);
if (rc) {
printk(KERN_WARNING "%s: Error parsing packet length; "
"rc = [%d]\n", __func__, rc);
goto out;
}
s->block_aligned_filename_size = (s->parsed_tag_70_packet_size
- ECRYPTFS_SIG_SIZE - 1);
if ((1 + s->packet_size_len + s->parsed_tag_70_packet_size)
> max_packet_size) {
printk(KERN_WARNING "%s: max_packet_size is [%zd]; real packet "
"size is [%zd]\n", __func__, max_packet_size,
(1 + s->packet_size_len + 1
+ s->block_aligned_filename_size));
rc = -EINVAL;
goto out;
}
(*packet_size) += s->packet_size_len;
ecryptfs_to_hex(s->fnek_sig_hex, &data[(*packet_size)],
ECRYPTFS_SIG_SIZE);
s->fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX] = '\0';
(*packet_size) += ECRYPTFS_SIG_SIZE;
s->cipher_code = data[(*packet_size)++];
rc = ecryptfs_cipher_code_to_string(s->cipher_string, s->cipher_code);
if (rc) {
printk(KERN_WARNING "%s: Cipher code [%d] is invalid\n",
__func__, s->cipher_code);
goto out;
}
rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
&s->auth_tok, mount_crypt_stat,
s->fnek_sig_hex);
if (rc) {
printk(KERN_ERR "%s: Error attempting to find auth tok for "
"fnek sig [%s]; rc = [%d]\n", __func__, s->fnek_sig_hex,
rc);
goto out;
}
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->skcipher_tfm,
&s->tfm_mutex,
s->cipher_string);
if (unlikely(rc)) {
printk(KERN_ERR "Internal error whilst attempting to get "
"tfm and mutex for cipher name [%s]; rc = [%d]\n",
s->cipher_string, rc);
goto out;
}
mutex_lock(s->tfm_mutex);
rc = virt_to_scatterlist(&data[(*packet_size)],
s->block_aligned_filename_size, s->src_sg, 2);
if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert encrypted filename memory to scatterlist; "
"rc = [%d]. block_aligned_filename_size = [%zd]\n",
__func__, rc, s->block_aligned_filename_size);
goto out_unlock;
}
(*packet_size) += s->block_aligned_filename_size;
s->decrypted_filename = kmalloc(s->block_aligned_filename_size,
GFP_KERNEL);
if (!s->decrypted_filename) {
rc = -ENOMEM;
goto out_unlock;
}
rc = virt_to_scatterlist(s->decrypted_filename,
s->block_aligned_filename_size, s->dst_sg, 2);
if (rc < 1) {
printk(KERN_ERR "%s: Internal error whilst attempting to "
"convert decrypted filename memory to scatterlist; "
"rc = [%d]. block_aligned_filename_size = [%zd]\n",
__func__, rc, s->block_aligned_filename_size);
goto out_free_unlock;
}
s->skcipher_req = skcipher_request_alloc(s->skcipher_tfm, GFP_KERNEL);
if (!s->skcipher_req) {
printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
"skcipher_request_alloc for %s\n", __func__,
crypto_skcipher_driver_name(s->skcipher_tfm));
rc = -ENOMEM;
goto out_free_unlock;
}
skcipher_request_set_callback(s->skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
/* The characters in the first block effectively do the job of
* the IV here, so we just use 0's for the IV. Note the
* constraint that ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES
* >= ECRYPTFS_MAX_IV_BYTES. */
/* TODO: Support other key modules than passphrase for
* filename encryption */
if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
rc = -EOPNOTSUPP;
printk(KERN_INFO "%s: Filename encryption only supports "
"password tokens\n", __func__);
goto out_free_unlock;
}
rc = crypto_skcipher_setkey(
s->skcipher_tfm,
s->auth_tok->token.password.session_key_encryption_key,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
if (rc < 0) {
printk(KERN_ERR "%s: Error setting key for crypto context; "
"rc = [%d]. s->auth_tok->token.password.session_key_"
"encryption_key = [0x%p]; mount_crypt_stat->"
"global_default_fn_cipher_key_bytes = [%zd]\n", __func__,
rc,
s->auth_tok->token.password.session_key_encryption_key,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
goto out_free_unlock;
}
skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
s->block_aligned_filename_size, s->iv);
rc = crypto_skcipher_decrypt(s->skcipher_req);
if (rc) {
printk(KERN_ERR "%s: Error attempting to decrypt filename; "
"rc = [%d]\n", __func__, rc);
goto out_free_unlock;
}
while (s->i < s->block_aligned_filename_size &&
s->decrypted_filename[s->i] != '\0')
s->i++;
if (s->i == s->block_aligned_filename_size) {
printk(KERN_WARNING "%s: Invalid tag 70 packet; could not "
"find valid separator between random characters and "
"the filename\n", __func__);
rc = -EINVAL;
goto out_free_unlock;
}
s->i++;
(*filename_size) = (s->block_aligned_filename_size - s->i);
if (!((*filename_size) > 0 && (*filename_size < PATH_MAX))) {
printk(KERN_WARNING "%s: Filename size is [%zd], which is "
"invalid\n", __func__, (*filename_size));
rc = -EINVAL;
goto out_free_unlock;
}
(*filename) = kmalloc(((*filename_size) + 1), GFP_KERNEL);
if (!(*filename)) {
rc = -ENOMEM;
goto out_free_unlock;
}
memcpy((*filename), &s->decrypted_filename[s->i], (*filename_size));
(*filename)[(*filename_size)] = '\0';
out_free_unlock:
kfree(s->decrypted_filename);
out_unlock:
mutex_unlock(s->tfm_mutex);
out:
if (rc) {
(*packet_size) = 0;
(*filename_size) = 0;
(*filename) = NULL;
}
if (auth_tok_key) {
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
}
skcipher_request_free(s->skcipher_req);
kfree(s);
return rc;
}
static int
ecryptfs_get_auth_tok_sig(char **sig, struct ecryptfs_auth_tok *auth_tok)
{
int rc = 0;
(*sig) = NULL;
switch (auth_tok->token_type) {
case ECRYPTFS_PASSWORD:
(*sig) = auth_tok->token.password.signature;
break;
case ECRYPTFS_PRIVATE_KEY:
(*sig) = auth_tok->token.private_key.signature;
break;
default:
printk(KERN_ERR "Cannot get sig for auth_tok of type [%d]\n",
auth_tok->token_type);
rc = -EINVAL;
}
return rc;
}
/**
* decrypt_pki_encrypted_session_key - Decrypt the session key with the given auth_tok.
* @auth_tok: The key authentication token used to decrypt the session key
* @crypt_stat: The cryptographic context
*
* Returns zero on success; non-zero error otherwise.
*/
static int
decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat)
{
u8 cipher_code = 0;
struct ecryptfs_msg_ctx *msg_ctx;
struct ecryptfs_message *msg = NULL;
char *auth_tok_sig;
char *payload = NULL;
size_t payload_len = 0;
int rc;
rc = ecryptfs_get_auth_tok_sig(&auth_tok_sig, auth_tok);
if (rc) {
printk(KERN_ERR "Unrecognized auth tok type: [%d]\n",
auth_tok->token_type);
goto out;
}
rc = write_tag_64_packet(auth_tok_sig, &(auth_tok->session_key),
&payload, &payload_len);
if (rc) {
ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet\n");
goto out;
}
rc = ecryptfs_send_message(payload, payload_len, &msg_ctx);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error sending message to "
"ecryptfsd: %d\n", rc);
goto out;
}
rc = ecryptfs_wait_for_response(msg_ctx, &msg);
if (rc) {
ecryptfs_printk(KERN_ERR, "Failed to receive tag 65 packet "
"from the user space daemon\n");
rc = -EIO;
goto out;
}
rc = parse_tag_65_packet(&(auth_tok->session_key),
&cipher_code, msg);
if (rc) {
printk(KERN_ERR "Failed to parse tag 65 packet; rc = [%d]\n",
rc);
goto out;
}
auth_tok->session_key.flags |= ECRYPTFS_CONTAINS_DECRYPTED_KEY;
memcpy(crypt_stat->key, auth_tok->session_key.decrypted_key,
auth_tok->session_key.decrypted_key_size);
crypt_stat->key_size = auth_tok->session_key.decrypted_key_size;
rc = ecryptfs_cipher_code_to_string(crypt_stat->cipher, cipher_code);
if (rc) {
ecryptfs_printk(KERN_ERR, "Cipher code [%d] is invalid\n",
cipher_code);
goto out;
}
crypt_stat->flags |= ECRYPTFS_KEY_VALID;
if (ecryptfs_verbosity > 0) {
ecryptfs_printk(KERN_DEBUG, "Decrypted session key:\n");
ecryptfs_dump_hex(crypt_stat->key,
crypt_stat->key_size);
}
out:
kfree(msg);
kfree(payload);
return rc;
}
static void wipe_auth_tok_list(struct list_head *auth_tok_list_head)
{
struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
struct ecryptfs_auth_tok_list_item *auth_tok_list_item_tmp;
list_for_each_entry_safe(auth_tok_list_item, auth_tok_list_item_tmp,
auth_tok_list_head, list) {
list_del(&auth_tok_list_item->list);
kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
auth_tok_list_item);
}
}
struct kmem_cache *ecryptfs_auth_tok_list_item_cache;
/**
* parse_tag_1_packet
* @crypt_stat: The cryptographic context to modify based on packet contents
* @data: The raw bytes of the packet.
* @auth_tok_list: eCryptfs parses packets into authentication tokens;
* a new authentication token will be placed at the
* end of this list for this packet.
* @new_auth_tok: Pointer to a pointer to memory that this function
* allocates; sets the memory address of the pointer to
* NULL on error. This object is added to the
* auth_tok_list.
* @packet_size: This function writes the size of the parsed packet
* into this memory location; zero on error.
* @max_packet_size: The maximum allowable packet size
*
* Returns zero on success; non-zero on error.
*/
static int
parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *data, struct list_head *auth_tok_list,
struct ecryptfs_auth_tok **new_auth_tok,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*new_auth_tok) = NULL;
/**
* This format is inspired by OpenPGP; see RFC 2440
* packet tag 1
*
* Tag 1 identifier (1 byte)
* Max Tag 1 packet size (max 3 bytes)
* Version (1 byte)
* Key identifier (8 bytes; ECRYPTFS_SIG_SIZE)
* Cipher identifier (1 byte)
* Encrypted key size (arbitrary)
*
* 12 bytes minimum packet size
*/
if (unlikely(max_packet_size < 12)) {
printk(KERN_ERR "Invalid max packet size; must be >=12\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_1_PACKET_TYPE) {
printk(KERN_ERR "Enter w/ first byte != 0x%.2x\n",
ECRYPTFS_TAG_1_PACKET_TYPE);
rc = -EINVAL;
goto out;
}
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
* at end of function upon failure */
auth_tok_list_item =
kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache,
GFP_KERNEL);
if (!auth_tok_list_item) {
printk(KERN_ERR "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
(*new_auth_tok) = &auth_tok_list_item->auth_tok;
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Error parsing packet length; "
"rc = [%d]\n", rc);
goto out_free;
}
if (unlikely(body_size < (ECRYPTFS_SIG_SIZE + 2))) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out_free;
}
(*packet_size) += length_size;
if (unlikely((*packet_size) + body_size > max_packet_size)) {
printk(KERN_WARNING "Packet size exceeds max\n");
rc = -EINVAL;
goto out_free;
}
if (unlikely(data[(*packet_size)++] != 0x03)) {
printk(KERN_WARNING "Unknown version number [%d]\n",
data[(*packet_size) - 1]);
rc = -EINVAL;
goto out_free;
}
ecryptfs_to_hex((*new_auth_tok)->token.private_key.signature,
&data[(*packet_size)], ECRYPTFS_SIG_SIZE);
*packet_size += ECRYPTFS_SIG_SIZE;
/* This byte is skipped because the kernel does not need to
* know which public key encryption algorithm was used */
(*packet_size)++;
(*new_auth_tok)->session_key.encrypted_key_size =
body_size - (ECRYPTFS_SIG_SIZE + 2);
if ((*new_auth_tok)->session_key.encrypted_key_size
> ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
printk(KERN_WARNING "Tag 1 packet contains key larger "
"than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
rc = -EINVAL;
goto out_free;
}
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
(*packet_size) += (*new_auth_tok)->session_key.encrypted_key_size;
(*new_auth_tok)->session_key.flags &=
~ECRYPTFS_CONTAINS_DECRYPTED_KEY;
(*new_auth_tok)->session_key.flags |=
ECRYPTFS_CONTAINS_ENCRYPTED_KEY;
(*new_auth_tok)->token_type = ECRYPTFS_PRIVATE_KEY;
(*new_auth_tok)->flags = 0;
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT);
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT);
list_add(&auth_tok_list_item->list, auth_tok_list);
goto out;
out_free:
(*new_auth_tok) = NULL;
memset(auth_tok_list_item, 0,
sizeof(struct ecryptfs_auth_tok_list_item));
kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
auth_tok_list_item);
out:
if (rc)
(*packet_size) = 0;
return rc;
}
/**
* parse_tag_3_packet
* @crypt_stat: The cryptographic context to modify based on packet
* contents.
* @data: The raw bytes of the packet.
* @auth_tok_list: eCryptfs parses packets into authentication tokens;
* a new authentication token will be placed at the end
* of this list for this packet.
* @new_auth_tok: Pointer to a pointer to memory that this function
* allocates; sets the memory address of the pointer to
* NULL on error. This object is added to the
* auth_tok_list.
* @packet_size: This function writes the size of the parsed packet
* into this memory location; zero on error.
* @max_packet_size: maximum number of bytes to parse
*
* Returns zero on success; non-zero on error.
*/
static int
parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *data, struct list_head *auth_tok_list,
struct ecryptfs_auth_tok **new_auth_tok,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*new_auth_tok) = NULL;
/**
*This format is inspired by OpenPGP; see RFC 2440
* packet tag 3
*
* Tag 3 identifier (1 byte)
* Max Tag 3 packet size (max 3 bytes)
* Version (1 byte)
* Cipher code (1 byte)
* S2K specifier (1 byte)
* Hash identifier (1 byte)
* Salt (ECRYPTFS_SALT_SIZE)
* Hash iterations (1 byte)
* Encrypted key (arbitrary)
*
* (ECRYPTFS_SALT_SIZE + 7) minimum packet size
*/
if (max_packet_size < (ECRYPTFS_SALT_SIZE + 7)) {
printk(KERN_ERR "Max packet size too large\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_3_PACKET_TYPE) {
printk(KERN_ERR "First byte != 0x%.2x; invalid packet\n",
ECRYPTFS_TAG_3_PACKET_TYPE);
rc = -EINVAL;
goto out;
}
/* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
* at end of function upon failure */
auth_tok_list_item =
kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
if (!auth_tok_list_item) {
printk(KERN_ERR "Unable to allocate memory\n");
rc = -ENOMEM;
goto out;
}
(*new_auth_tok) = &auth_tok_list_item->auth_tok;
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n",
rc);
goto out_free;
}
if (unlikely(body_size < (ECRYPTFS_SALT_SIZE + 5))) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out_free;
}
(*packet_size) += length_size;
if (unlikely((*packet_size) + body_size > max_packet_size)) {
printk(KERN_ERR "Packet size exceeds max\n");
rc = -EINVAL;
goto out_free;
}
(*new_auth_tok)->session_key.encrypted_key_size =
(body_size - (ECRYPTFS_SALT_SIZE + 5));
if ((*new_auth_tok)->session_key.encrypted_key_size
> ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
printk(KERN_WARNING "Tag 3 packet contains key larger "
"than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
rc = -EINVAL;
goto out_free;
}
if (unlikely(data[(*packet_size)++] != 0x04)) {
printk(KERN_WARNING "Unknown version number [%d]\n",
data[(*packet_size) - 1]);
rc = -EINVAL;
goto out_free;
}
rc = ecryptfs_cipher_code_to_string(crypt_stat->cipher,
(u16)data[(*packet_size)]);
if (rc)
goto out_free;
/* A little extra work to differentiate among the AES key
* sizes; see RFC2440 */
switch(data[(*packet_size)++]) {
case RFC2440_CIPHER_AES_192:
crypt_stat->key_size = 24;
break;
default:
crypt_stat->key_size =
(*new_auth_tok)->session_key.encrypted_key_size;
}
rc = ecryptfs_init_crypt_ctx(crypt_stat);
if (rc)
goto out_free;
if (unlikely(data[(*packet_size)++] != 0x03)) {
printk(KERN_WARNING "Only S2K ID 3 is currently supported\n");
rc = -ENOSYS;
goto out_free;
}
/* TODO: finish the hash mapping */
switch (data[(*packet_size)++]) {
case 0x01: /* See RFC2440 for these numbers and their mappings */
/* Choose MD5 */
memcpy((*new_auth_tok)->token.password.salt,
&data[(*packet_size)], ECRYPTFS_SALT_SIZE);
(*packet_size) += ECRYPTFS_SALT_SIZE;
/* This conversion was taken straight from RFC2440 */
(*new_auth_tok)->token.password.hash_iterations =
((u32) 16 + (data[(*packet_size)] & 15))
<< ((data[(*packet_size)] >> 4) + 6);
(*packet_size)++;
/* Friendly reminder:
* (*new_auth_tok)->session_key.encrypted_key_size =
* (body_size - (ECRYPTFS_SALT_SIZE + 5)); */
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)],
(*new_auth_tok)->session_key.encrypted_key_size);
(*packet_size) +=
(*new_auth_tok)->session_key.encrypted_key_size;
(*new_auth_tok)->session_key.flags &=
~ECRYPTFS_CONTAINS_DECRYPTED_KEY;
(*new_auth_tok)->session_key.flags |=
ECRYPTFS_CONTAINS_ENCRYPTED_KEY;
(*new_auth_tok)->token.password.hash_algo = 0x01; /* MD5 */
break;
default:
ecryptfs_printk(KERN_ERR, "Unsupported hash algorithm: "
"[%d]\n", data[(*packet_size) - 1]);
rc = -ENOSYS;
goto out_free;
}
(*new_auth_tok)->token_type = ECRYPTFS_PASSWORD;
/* TODO: Parametarize; we might actually want userspace to
* decrypt the session key. */
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT);
(*new_auth_tok)->session_key.flags &=
~(ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT);
list_add(&auth_tok_list_item->list, auth_tok_list);
goto out;
out_free:
(*new_auth_tok) = NULL;
memset(auth_tok_list_item, 0,
sizeof(struct ecryptfs_auth_tok_list_item));
kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
auth_tok_list_item);
out:
if (rc)
(*packet_size) = 0;
return rc;
}
/**
* parse_tag_11_packet
* @data: The raw bytes of the packet
* @contents: This function writes the data contents of the literal
* packet into this memory location
* @max_contents_bytes: The maximum number of bytes that this function
* is allowed to write into contents
* @tag_11_contents_size: This function writes the size of the parsed
* contents into this memory location; zero on
* error
* @packet_size: This function writes the size of the parsed packet
* into this memory location; zero on error
* @max_packet_size: maximum number of bytes to parse
*
* Returns zero on success; non-zero on error.
*/
static int
parse_tag_11_packet(unsigned char *data, unsigned char *contents,
size_t max_contents_bytes, size_t *tag_11_contents_size,
size_t *packet_size, size_t max_packet_size)
{
size_t body_size;
size_t length_size;
int rc = 0;
(*packet_size) = 0;
(*tag_11_contents_size) = 0;
/* This format is inspired by OpenPGP; see RFC 2440
* packet tag 11
*
* Tag 11 identifier (1 byte)
* Max Tag 11 packet size (max 3 bytes)
* Binary format specifier (1 byte)
* Filename length (1 byte)
* Filename ("_CONSOLE") (8 bytes)
* Modification date (4 bytes)
* Literal data (arbitrary)
*
* We need at least 16 bytes of data for the packet to even be
* valid.
*/
if (max_packet_size < 16) {
printk(KERN_ERR "Maximum packet size too small\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != ECRYPTFS_TAG_11_PACKET_TYPE) {
printk(KERN_WARNING "Invalid tag 11 packet format\n");
rc = -EINVAL;
goto out;
}
rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
&length_size);
if (rc) {
printk(KERN_WARNING "Invalid tag 11 packet format\n");
goto out;
}
if (body_size < 14) {
printk(KERN_WARNING "Invalid body size ([%td])\n", body_size);
rc = -EINVAL;
goto out;
}
(*packet_size) += length_size;
(*tag_11_contents_size) = (body_size - 14);
if (unlikely((*packet_size) + body_size + 1 > max_packet_size)) {
printk(KERN_ERR "Packet size exceeds max\n");
rc = -EINVAL;
goto out;
}
if (unlikely((*tag_11_contents_size) > max_contents_bytes)) {
printk(KERN_ERR "Literal data section in tag 11 packet exceeds "
"expected size\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != 0x62) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
goto out;
}
if (data[(*packet_size)++] != 0x08) {
printk(KERN_WARNING "Unrecognizable packet\n");
rc = -EINVAL;
goto out;
}
(*packet_size) += 12; /* Ignore filename and modification date */
memcpy(contents, &data[(*packet_size)], (*tag_11_contents_size));
(*packet_size) += (*tag_11_contents_size);
out:
if (rc) {
(*packet_size) = 0;
(*tag_11_contents_size) = 0;
}
return rc;
}
int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
struct ecryptfs_auth_tok **auth_tok,
char *sig)
{
int rc = 0;
(*auth_tok_key) = request_key(&key_type_user, sig, NULL);
if (IS_ERR(*auth_tok_key)) {
(*auth_tok_key) = ecryptfs_get_encrypted_key(sig);
if (IS_ERR(*auth_tok_key)) {
printk(KERN_ERR "Could not find key with description: [%s]\n",
sig);
rc = process_request_key_err(PTR_ERR(*auth_tok_key));
(*auth_tok_key) = NULL;
goto out;
}
}
down_write(&(*auth_tok_key)->sem);
rc = ecryptfs_verify_auth_tok_from_key(*auth_tok_key, auth_tok);
if (rc) {
up_write(&(*auth_tok_key)->sem);
key_put(*auth_tok_key);
(*auth_tok_key) = NULL;
goto out;
}
out:
return rc;
}
/**
* decrypt_passphrase_encrypted_session_key - Decrypt the session key with the given auth_tok.
* @auth_tok: The passphrase authentication token to use to encrypt the FEK
* @crypt_stat: The cryptographic context
*
* Returns zero on success; non-zero error otherwise
*/
static int
decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat)
{
struct scatterlist dst_sg[2];
struct scatterlist src_sg[2];
struct mutex *tfm_mutex;
struct crypto_skcipher *tfm;
struct skcipher_request *req = NULL;
int rc = 0;
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(
KERN_DEBUG, "Session key encryption key (size [%d]):\n",
auth_tok->token.password.session_key_encryption_key_bytes);
ecryptfs_dump_hex(
auth_tok->token.password.session_key_encryption_key,
auth_tok->token.password.session_key_encryption_key_bytes);
}
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
crypt_stat->cipher);
if (unlikely(rc)) {
printk(KERN_ERR "Internal error whilst attempting to get "
"tfm and mutex for cipher name [%s]; rc = [%d]\n",
crypt_stat->cipher, rc);
goto out;
}
rc = virt_to_scatterlist(auth_tok->session_key.encrypted_key,
auth_tok->session_key.encrypted_key_size,
src_sg, 2);
if (rc < 1 || rc > 2) {
printk(KERN_ERR "Internal error whilst attempting to convert "
"auth_tok->session_key.encrypted_key to scatterlist; "
"expected rc = 1; got rc = [%d]. "
"auth_tok->session_key.encrypted_key_size = [%d]\n", rc,
auth_tok->session_key.encrypted_key_size);
goto out;
}
auth_tok->session_key.decrypted_key_size =
auth_tok->session_key.encrypted_key_size;
rc = virt_to_scatterlist(auth_tok->session_key.decrypted_key,
auth_tok->session_key.decrypted_key_size,
dst_sg, 2);
if (rc < 1 || rc > 2) {
printk(KERN_ERR "Internal error whilst attempting to convert "
"auth_tok->session_key.decrypted_key to scatterlist; "
"expected rc = 1; got rc = [%d]\n", rc);
goto out;
}
mutex_lock(tfm_mutex);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
mutex_unlock(tfm_mutex);
printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
"skcipher_request_alloc for %s\n", __func__,
crypto_skcipher_driver_name(tfm));
rc = -ENOMEM;
goto out;
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
rc = crypto_skcipher_setkey(
tfm, auth_tok->token.password.session_key_encryption_key,
crypt_stat->key_size);
if (unlikely(rc < 0)) {
mutex_unlock(tfm_mutex);
printk(KERN_ERR "Error setting key for crypto context\n");
rc = -EINVAL;
goto out;
}
skcipher_request_set_crypt(req, src_sg, dst_sg,
auth_tok->session_key.encrypted_key_size,
NULL);
rc = crypto_skcipher_decrypt(req);
mutex_unlock(tfm_mutex);
if (unlikely(rc)) {
printk(KERN_ERR "Error decrypting; rc = [%d]\n", rc);
goto out;
}
auth_tok->session_key.flags |= ECRYPTFS_CONTAINS_DECRYPTED_KEY;
memcpy(crypt_stat->key, auth_tok->session_key.decrypted_key,
auth_tok->session_key.decrypted_key_size);
crypt_stat->flags |= ECRYPTFS_KEY_VALID;
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "FEK of size [%zd]:\n",
crypt_stat->key_size);
ecryptfs_dump_hex(crypt_stat->key,
crypt_stat->key_size);
}
out:
skcipher_request_free(req);
return rc;
}
/**
* ecryptfs_parse_packet_set
* @crypt_stat: The cryptographic context
* @src: Virtual address of region of memory containing the packets
* @ecryptfs_dentry: The eCryptfs dentry associated with the packet set
*
* Get crypt_stat to have the file's session key if the requisite key
* is available to decrypt the session key.
*
* Returns Zero if a valid authentication token was retrieved and
* processed; negative value for file not encrypted or for error
* conditions.
*/
int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
unsigned char *src,
struct dentry *ecryptfs_dentry)
{
size_t i = 0;
size_t found_auth_tok;
size_t next_packet_is_auth_tok_packet;
struct list_head auth_tok_list;
struct ecryptfs_auth_tok *matching_auth_tok;
struct ecryptfs_auth_tok *candidate_auth_tok;
char *candidate_auth_tok_sig;
size_t packet_size;
struct ecryptfs_auth_tok *new_auth_tok;
unsigned char sig_tmp_space[ECRYPTFS_SIG_SIZE];
struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
size_t tag_11_contents_size;
size_t tag_11_packet_size;
struct key *auth_tok_key = NULL;
int rc = 0;
INIT_LIST_HEAD(&auth_tok_list);
/* Parse the header to find as many packets as we can; these will be
* added the our &auth_tok_list */
next_packet_is_auth_tok_packet = 1;
while (next_packet_is_auth_tok_packet) {
size_t max_packet_size = ((PAGE_SIZE - 8) - i);
switch (src[i]) {
case ECRYPTFS_TAG_3_PACKET_TYPE:
rc = parse_tag_3_packet(crypt_stat,
(unsigned char *)&src[i],
&auth_tok_list, &new_auth_tok,
&packet_size, max_packet_size);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error parsing "
"tag 3 packet\n");
rc = -EIO;
goto out_wipe_list;
}
i += packet_size;
rc = parse_tag_11_packet((unsigned char *)&src[i],
sig_tmp_space,
ECRYPTFS_SIG_SIZE,
&tag_11_contents_size,
&tag_11_packet_size,
max_packet_size);
if (rc) {
ecryptfs_printk(KERN_ERR, "No valid "
"(ecryptfs-specific) literal "
"packet containing "
"authentication token "
"signature found after "
"tag 3 packet\n");
rc = -EIO;
goto out_wipe_list;
}
i += tag_11_packet_size;
if (ECRYPTFS_SIG_SIZE != tag_11_contents_size) {
ecryptfs_printk(KERN_ERR, "Expected "
"signature of size [%d]; "
"read size [%zd]\n",
ECRYPTFS_SIG_SIZE,
tag_11_contents_size);
rc = -EIO;
goto out_wipe_list;
}
ecryptfs_to_hex(new_auth_tok->token.password.signature,
sig_tmp_space, tag_11_contents_size);
new_auth_tok->token.password.signature[
ECRYPTFS_PASSWORD_SIG_SIZE] = '\0';
crypt_stat->flags |= ECRYPTFS_ENCRYPTED;
break;
case ECRYPTFS_TAG_1_PACKET_TYPE:
rc = parse_tag_1_packet(crypt_stat,
(unsigned char *)&src[i],
&auth_tok_list, &new_auth_tok,
&packet_size, max_packet_size);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error parsing "
"tag 1 packet\n");
rc = -EIO;
goto out_wipe_list;
}
i += packet_size;
crypt_stat->flags |= ECRYPTFS_ENCRYPTED;
break;
case ECRYPTFS_TAG_11_PACKET_TYPE:
ecryptfs_printk(KERN_WARNING, "Invalid packet set "
"(Tag 11 not allowed by itself)\n");
rc = -EIO;
goto out_wipe_list;
default:
ecryptfs_printk(KERN_DEBUG, "No packet at offset [%zd] "
"of the file header; hex value of "
"character is [0x%.2x]\n", i, src[i]);
next_packet_is_auth_tok_packet = 0;
}
}
if (list_empty(&auth_tok_list)) {
printk(KERN_ERR "The lower file appears to be a non-encrypted "
"eCryptfs file; this is not supported in this version "
"of the eCryptfs kernel module\n");
rc = -EINVAL;
goto out;
}
/* auth_tok_list contains the set of authentication tokens
* parsed from the metadata. We need to find a matching
* authentication token that has the secret component(s)
* necessary to decrypt the EFEK in the auth_tok parsed from
* the metadata. There may be several potential matches, but
* just one will be sufficient to decrypt to get the FEK. */
find_next_matching_auth_tok:
found_auth_tok = 0;
list_for_each_entry(auth_tok_list_item, &auth_tok_list, list) {
candidate_auth_tok = &auth_tok_list_item->auth_tok;
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG,
"Considering candidate auth tok:\n");
ecryptfs_dump_auth_tok(candidate_auth_tok);
}
rc = ecryptfs_get_auth_tok_sig(&candidate_auth_tok_sig,
candidate_auth_tok);
if (rc) {
printk(KERN_ERR
"Unrecognized candidate auth tok type: [%d]\n",
candidate_auth_tok->token_type);
rc = -EINVAL;
goto out_wipe_list;
}
rc = ecryptfs_find_auth_tok_for_sig(&auth_tok_key,
&matching_auth_tok,
crypt_stat->mount_crypt_stat,
candidate_auth_tok_sig);
if (!rc) {
found_auth_tok = 1;
goto found_matching_auth_tok;
}
}
if (!found_auth_tok) {
ecryptfs_printk(KERN_ERR, "Could not find a usable "
"authentication token\n");
rc = -EIO;
goto out_wipe_list;
}
found_matching_auth_tok:
if (candidate_auth_tok->token_type == ECRYPTFS_PRIVATE_KEY) {
memcpy(&(candidate_auth_tok->token.private_key),
&(matching_auth_tok->token.private_key),
sizeof(struct ecryptfs_private_key));
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
rc = decrypt_pki_encrypted_session_key(candidate_auth_tok,
crypt_stat);
} else if (candidate_auth_tok->token_type == ECRYPTFS_PASSWORD) {
memcpy(&(candidate_auth_tok->token.password),
&(matching_auth_tok->token.password),
sizeof(struct ecryptfs_password));
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
rc = decrypt_passphrase_encrypted_session_key(
candidate_auth_tok, crypt_stat);
} else {
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
rc = -EINVAL;
}
if (rc) {
struct ecryptfs_auth_tok_list_item *auth_tok_list_item_tmp;
ecryptfs_printk(KERN_WARNING, "Error decrypting the "
"session key for authentication token with sig "
"[%.*s]; rc = [%d]. Removing auth tok "
"candidate from the list and searching for "
"the next match.\n", ECRYPTFS_SIG_SIZE_HEX,
candidate_auth_tok_sig, rc);
list_for_each_entry_safe(auth_tok_list_item,
auth_tok_list_item_tmp,
&auth_tok_list, list) {
if (candidate_auth_tok
== &auth_tok_list_item->auth_tok) {
list_del(&auth_tok_list_item->list);
kmem_cache_free(
ecryptfs_auth_tok_list_item_cache,
auth_tok_list_item);
goto find_next_matching_auth_tok;
}
}
BUG();
}
rc = ecryptfs_compute_root_iv(crypt_stat);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error computing "
"the root IV\n");
goto out_wipe_list;
}
rc = ecryptfs_init_crypt_ctx(crypt_stat);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error initializing crypto "
"context for cipher [%s]; rc = [%d]\n",
crypt_stat->cipher, rc);
}
out_wipe_list:
wipe_auth_tok_list(&auth_tok_list);
out:
return rc;
}
static int
pki_encrypt_session_key(struct key *auth_tok_key,
struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_key_record *key_rec)
{
struct ecryptfs_msg_ctx *msg_ctx = NULL;
char *payload = NULL;
size_t payload_len = 0;
struct ecryptfs_message *msg;
int rc;
rc = write_tag_66_packet(auth_tok->token.private_key.signature,
ecryptfs_code_for_cipher_string(
crypt_stat->cipher,
crypt_stat->key_size),
crypt_stat, &payload, &payload_len);
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet\n");
goto out;
}
rc = ecryptfs_send_message(payload, payload_len, &msg_ctx);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error sending message to "
"ecryptfsd: %d\n", rc);
goto out;
}
rc = ecryptfs_wait_for_response(msg_ctx, &msg);
if (rc) {
ecryptfs_printk(KERN_ERR, "Failed to receive tag 67 packet "
"from the user space daemon\n");
rc = -EIO;
goto out;
}
rc = parse_tag_67_packet(key_rec, msg);
if (rc)
ecryptfs_printk(KERN_ERR, "Error parsing tag 67 packet\n");
kfree(msg);
out:
kfree(payload);
return rc;
}
/**
* write_tag_1_packet - Write an RFC2440-compatible tag 1 (public key) packet
* @dest: Buffer into which to write the packet
* @remaining_bytes: Maximum number of bytes that can be writtn
* @auth_tok_key: The authentication token key to unlock and put when done with
* @auth_tok
* @auth_tok: The authentication token used for generating the tag 1 packet
* @crypt_stat: The cryptographic context
* @key_rec: The key record struct for the tag 1 packet
* @packet_size: This function will write the number of bytes that end
* up constituting the packet; set to zero on error
*
* Returns zero on success; non-zero on error.
*/
static int
write_tag_1_packet(char *dest, size_t *remaining_bytes,
struct key *auth_tok_key, struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_key_record *key_rec, size_t *packet_size)
{
size_t i;
size_t encrypted_session_key_valid = 0;
size_t packet_size_length;
size_t max_packet_size;
int rc = 0;
(*packet_size) = 0;
ecryptfs_from_hex(key_rec->sig, auth_tok->token.private_key.signature,
ECRYPTFS_SIG_SIZE);
encrypted_session_key_valid = 0;
for (i = 0; i < crypt_stat->key_size; i++)
encrypted_session_key_valid |=
auth_tok->session_key.encrypted_key[i];
if (encrypted_session_key_valid) {
memcpy(key_rec->enc_key,
auth_tok->session_key.encrypted_key,
auth_tok->session_key.encrypted_key_size);
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
goto encrypted_session_key_set;
}
if (auth_tok->session_key.encrypted_key_size == 0)
auth_tok->session_key.encrypted_key_size =
auth_tok->token.private_key.key_size;
rc = pki_encrypt_session_key(auth_tok_key, auth_tok, crypt_stat,
key_rec);
if (rc) {
printk(KERN_ERR "Failed to encrypt session key via a key "
"module; rc = [%d]\n", rc);
goto out;
}
if (ecryptfs_verbosity > 0) {
ecryptfs_printk(KERN_DEBUG, "Encrypted key:\n");
ecryptfs_dump_hex(key_rec->enc_key, key_rec->enc_key_size);
}
encrypted_session_key_set:
/* This format is inspired by OpenPGP; see RFC 2440
* packet tag 1 */
max_packet_size = (1 /* Tag 1 identifier */
+ 3 /* Max Tag 1 packet size */
+ 1 /* Version */
+ ECRYPTFS_SIG_SIZE /* Key identifier */
+ 1 /* Cipher identifier */
+ key_rec->enc_key_size); /* Encrypted key size */
if (max_packet_size > (*remaining_bytes)) {
printk(KERN_ERR "Packet length larger than maximum allowable; "
"need up to [%td] bytes, but there are only [%td] "
"available\n", max_packet_size, (*remaining_bytes));
rc = -EINVAL;
goto out;
}
dest[(*packet_size)++] = ECRYPTFS_TAG_1_PACKET_TYPE;
rc = ecryptfs_write_packet_length(&dest[(*packet_size)],
(max_packet_size - 4),
&packet_size_length);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error generating tag 1 packet "
"header; cannot generate packet length\n");
goto out;
}
(*packet_size) += packet_size_length;
dest[(*packet_size)++] = 0x03; /* version 3 */
memcpy(&dest[(*packet_size)], key_rec->sig, ECRYPTFS_SIG_SIZE);
(*packet_size) += ECRYPTFS_SIG_SIZE;
dest[(*packet_size)++] = RFC2440_CIPHER_RSA;
memcpy(&dest[(*packet_size)], key_rec->enc_key,
key_rec->enc_key_size);
(*packet_size) += key_rec->enc_key_size;
out:
if (rc)
(*packet_size) = 0;
else
(*remaining_bytes) -= (*packet_size);
return rc;
}
/**
* write_tag_11_packet
* @dest: Target into which Tag 11 packet is to be written
* @remaining_bytes: Maximum packet length
* @contents: Byte array of contents to copy in
* @contents_length: Number of bytes in contents
* @packet_length: Length of the Tag 11 packet written; zero on error
*
* Returns zero on success; non-zero on error.
*/
static int
write_tag_11_packet(char *dest, size_t *remaining_bytes, char *contents,
size_t contents_length, size_t *packet_length)
{
size_t packet_size_length;
size_t max_packet_size;
int rc = 0;
(*packet_length) = 0;
/* This format is inspired by OpenPGP; see RFC 2440
* packet tag 11 */
max_packet_size = (1 /* Tag 11 identifier */
+ 3 /* Max Tag 11 packet size */
+ 1 /* Binary format specifier */
+ 1 /* Filename length */
+ 8 /* Filename ("_CONSOLE") */
+ 4 /* Modification date */
+ contents_length); /* Literal data */
if (max_packet_size > (*remaining_bytes)) {
printk(KERN_ERR "Packet length larger than maximum allowable; "
"need up to [%td] bytes, but there are only [%td] "
"available\n", max_packet_size, (*remaining_bytes));
rc = -EINVAL;
goto out;
}
dest[(*packet_length)++] = ECRYPTFS_TAG_11_PACKET_TYPE;
rc = ecryptfs_write_packet_length(&dest[(*packet_length)],
(max_packet_size - 4),
&packet_size_length);
if (rc) {
printk(KERN_ERR "Error generating tag 11 packet header; cannot "
"generate packet length. rc = [%d]\n", rc);
goto out;
}
(*packet_length) += packet_size_length;
dest[(*packet_length)++] = 0x62; /* binary data format specifier */
dest[(*packet_length)++] = 8;
memcpy(&dest[(*packet_length)], "_CONSOLE", 8);
(*packet_length) += 8;
memset(&dest[(*packet_length)], 0x00, 4);
(*packet_length) += 4;
memcpy(&dest[(*packet_length)], contents, contents_length);
(*packet_length) += contents_length;
out:
if (rc)
(*packet_length) = 0;
else
(*remaining_bytes) -= (*packet_length);
return rc;
}
/**
* write_tag_3_packet
* @dest: Buffer into which to write the packet
* @remaining_bytes: Maximum number of bytes that can be written
* @auth_tok: Authentication token
* @crypt_stat: The cryptographic context
* @key_rec: encrypted key
* @packet_size: This function will write the number of bytes that end
* up constituting the packet; set to zero on error
*
* Returns zero on success; non-zero on error.
*/
static int
write_tag_3_packet(char *dest, size_t *remaining_bytes,
struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_key_record *key_rec, size_t *packet_size)
{
size_t i;
size_t encrypted_session_key_valid = 0;
char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES];
struct scatterlist dst_sg[2];
struct scatterlist src_sg[2];
struct mutex *tfm_mutex = NULL;
u8 cipher_code;
size_t packet_size_length;
size_t max_packet_size;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
crypt_stat->mount_crypt_stat;
struct crypto_skcipher *tfm;
struct skcipher_request *req;
int rc = 0;
(*packet_size) = 0;
ecryptfs_from_hex(key_rec->sig, auth_tok->token.password.signature,
ECRYPTFS_SIG_SIZE);
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
crypt_stat->cipher);
if (unlikely(rc)) {
printk(KERN_ERR "Internal error whilst attempting to get "
"tfm and mutex for cipher name [%s]; rc = [%d]\n",
crypt_stat->cipher, rc);
goto out;
}
if (mount_crypt_stat->global_default_cipher_key_size == 0) {
printk(KERN_WARNING "No key size specified at mount; "
"defaulting to [%d]\n",
crypto_skcipher_max_keysize(tfm));
mount_crypt_stat->global_default_cipher_key_size =
crypto_skcipher_max_keysize(tfm);
}
if (crypt_stat->key_size == 0)
crypt_stat->key_size =
mount_crypt_stat->global_default_cipher_key_size;
if (auth_tok->session_key.encrypted_key_size == 0)
auth_tok->session_key.encrypted_key_size =
crypt_stat->key_size;
if (crypt_stat->key_size == 24
&& strcmp("aes", crypt_stat->cipher) == 0) {
memset((crypt_stat->key + 24), 0, 8);
auth_tok->session_key.encrypted_key_size = 32;
} else
auth_tok->session_key.encrypted_key_size = crypt_stat->key_size;
key_rec->enc_key_size =
auth_tok->session_key.encrypted_key_size;
encrypted_session_key_valid = 0;
for (i = 0; i < auth_tok->session_key.encrypted_key_size; i++)
encrypted_session_key_valid |=
auth_tok->session_key.encrypted_key[i];
if (encrypted_session_key_valid) {
ecryptfs_printk(KERN_DEBUG, "encrypted_session_key_valid != 0; "
"using auth_tok->session_key.encrypted_key, "
"where key_rec->enc_key_size = [%zd]\n",
key_rec->enc_key_size);
memcpy(key_rec->enc_key,
auth_tok->session_key.encrypted_key,
key_rec->enc_key_size);
goto encrypted_session_key_set;
}
if (auth_tok->token.password.flags &
ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET) {
ecryptfs_printk(KERN_DEBUG, "Using previously generated "
"session key encryption key of size [%d]\n",
auth_tok->token.password.
session_key_encryption_key_bytes);
memcpy(session_key_encryption_key,
auth_tok->token.password.session_key_encryption_key,
crypt_stat->key_size);
ecryptfs_printk(KERN_DEBUG,
"Cached session key encryption key:\n");
if (ecryptfs_verbosity > 0)
ecryptfs_dump_hex(session_key_encryption_key, 16);
}
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Session key encryption key:\n");
ecryptfs_dump_hex(session_key_encryption_key, 16);
}
rc = virt_to_scatterlist(crypt_stat->key, key_rec->enc_key_size,
src_sg, 2);
if (rc < 1 || rc > 2) {
ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
"for crypt_stat session key; expected rc = 1; "
"got rc = [%d]. key_rec->enc_key_size = [%zd]\n",
rc, key_rec->enc_key_size);
rc = -ENOMEM;
goto out;
}
rc = virt_to_scatterlist(key_rec->enc_key, key_rec->enc_key_size,
dst_sg, 2);
if (rc < 1 || rc > 2) {
ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
"for crypt_stat encrypted session key; "
"expected rc = 1; got rc = [%d]. "
"key_rec->enc_key_size = [%zd]\n", rc,
key_rec->enc_key_size);
rc = -ENOMEM;
goto out;
}
mutex_lock(tfm_mutex);
rc = crypto_skcipher_setkey(tfm, session_key_encryption_key,
crypt_stat->key_size);
if (rc < 0) {
mutex_unlock(tfm_mutex);
ecryptfs_printk(KERN_ERR, "Error setting key for crypto "
"context; rc = [%d]\n", rc);
goto out;
}
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
mutex_unlock(tfm_mutex);
ecryptfs_printk(KERN_ERR, "Out of kernel memory whilst "
"attempting to skcipher_request_alloc for "
"%s\n", crypto_skcipher_driver_name(tfm));
rc = -ENOMEM;
goto out;
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
rc = 0;
ecryptfs_printk(KERN_DEBUG, "Encrypting [%zd] bytes of the key\n",
crypt_stat->key_size);
skcipher_request_set_crypt(req, src_sg, dst_sg,
(*key_rec).enc_key_size, NULL);
rc = crypto_skcipher_encrypt(req);
mutex_unlock(tfm_mutex);
skcipher_request_free(req);
if (rc) {
printk(KERN_ERR "Error encrypting; rc = [%d]\n", rc);
goto out;
}
ecryptfs_printk(KERN_DEBUG, "This should be the encrypted key:\n");
if (ecryptfs_verbosity > 0) {
ecryptfs_printk(KERN_DEBUG, "EFEK of size [%zd]:\n",
key_rec->enc_key_size);
ecryptfs_dump_hex(key_rec->enc_key,
key_rec->enc_key_size);
}
encrypted_session_key_set:
/* This format is inspired by OpenPGP; see RFC 2440
* packet tag 3 */
max_packet_size = (1 /* Tag 3 identifier */
+ 3 /* Max Tag 3 packet size */
+ 1 /* Version */
+ 1 /* Cipher code */
+ 1 /* S2K specifier */
+ 1 /* Hash identifier */
+ ECRYPTFS_SALT_SIZE /* Salt */
+ 1 /* Hash iterations */
+ key_rec->enc_key_size); /* Encrypted key size */
if (max_packet_size > (*remaining_bytes)) {
printk(KERN_ERR "Packet too large; need up to [%td] bytes, but "
"there are only [%td] available\n", max_packet_size,
(*remaining_bytes));
rc = -EINVAL;
goto out;
}
dest[(*packet_size)++] = ECRYPTFS_TAG_3_PACKET_TYPE;
/* Chop off the Tag 3 identifier(1) and Tag 3 packet size(3)
* to get the number of octets in the actual Tag 3 packet */
rc = ecryptfs_write_packet_length(&dest[(*packet_size)],
(max_packet_size - 4),
&packet_size_length);
if (rc) {
printk(KERN_ERR "Error generating tag 3 packet header; cannot "
"generate packet length. rc = [%d]\n", rc);
goto out;
}
(*packet_size) += packet_size_length;
dest[(*packet_size)++] = 0x04; /* version 4 */
/* TODO: Break from RFC2440 so that arbitrary ciphers can be
* specified with strings */
cipher_code = ecryptfs_code_for_cipher_string(crypt_stat->cipher,
crypt_stat->key_size);
if (cipher_code == 0) {
ecryptfs_printk(KERN_WARNING, "Unable to generate code for "
"cipher [%s]\n", crypt_stat->cipher);
rc = -EINVAL;
goto out;
}
dest[(*packet_size)++] = cipher_code;
dest[(*packet_size)++] = 0x03; /* S2K */
dest[(*packet_size)++] = 0x01; /* MD5 (TODO: parameterize) */
memcpy(&dest[(*packet_size)], auth_tok->token.password.salt,
ECRYPTFS_SALT_SIZE);
(*packet_size) += ECRYPTFS_SALT_SIZE; /* salt */
dest[(*packet_size)++] = 0x60; /* hash iterations (65536) */
memcpy(&dest[(*packet_size)], key_rec->enc_key,
key_rec->enc_key_size);
(*packet_size) += key_rec->enc_key_size;
out:
if (rc)
(*packet_size) = 0;
else
(*remaining_bytes) -= (*packet_size);
return rc;
}
struct kmem_cache *ecryptfs_key_record_cache;
/**
* ecryptfs_generate_key_packet_set
* @dest_base: Virtual address from which to write the key record set
* @crypt_stat: The cryptographic context from which the
* authentication tokens will be retrieved
* @ecryptfs_dentry: The dentry, used to retrieve the mount crypt stat
* for the global parameters
* @len: The amount written
* @max: The maximum amount of data allowed to be written
*
* Generates a key packet set and writes it to the virtual address
* passed in.
*
* Returns zero on success; non-zero on error.
*/
int
ecryptfs_generate_key_packet_set(char *dest_base,
struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry, size_t *len,
size_t max)
{
struct ecryptfs_auth_tok *auth_tok;
struct key *auth_tok_key = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
size_t written;
struct ecryptfs_key_record *key_rec;
struct ecryptfs_key_sig *key_sig;
int rc = 0;
(*len) = 0;
mutex_lock(&crypt_stat->keysig_list_mutex);
key_rec = kmem_cache_alloc(ecryptfs_key_record_cache, GFP_KERNEL);
if (!key_rec) {
rc = -ENOMEM;
goto out;
}
list_for_each_entry(key_sig, &crypt_stat->keysig_list,
crypt_stat_list) {
memset(key_rec, 0, sizeof(*key_rec));
rc = ecryptfs_find_global_auth_tok_for_sig(&auth_tok_key,
&auth_tok,
mount_crypt_stat,
key_sig->keysig);
if (rc) {
printk(KERN_WARNING "Unable to retrieve auth tok with "
"sig = [%s]\n", key_sig->keysig);
rc = process_find_global_auth_tok_for_sig_err(rc);
goto out_free;
}
if (auth_tok->token_type == ECRYPTFS_PASSWORD) {
rc = write_tag_3_packet((dest_base + (*len)),
&max, auth_tok,
crypt_stat, key_rec,
&written);
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error "
"writing tag 3 packet\n");
goto out_free;
}
(*len) += written;
/* Write auth tok signature packet */
rc = write_tag_11_packet((dest_base + (*len)), &max,
key_rec->sig,
ECRYPTFS_SIG_SIZE, &written);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error writing "
"auth tok signature packet\n");
goto out_free;
}
(*len) += written;
} else if (auth_tok->token_type == ECRYPTFS_PRIVATE_KEY) {
rc = write_tag_1_packet(dest_base + (*len), &max,
auth_tok_key, auth_tok,
crypt_stat, key_rec, &written);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error "
"writing tag 1 packet\n");
goto out_free;
}
(*len) += written;
} else {
up_write(&(auth_tok_key->sem));
key_put(auth_tok_key);
ecryptfs_printk(KERN_WARNING, "Unsupported "
"authentication token type\n");
rc = -EINVAL;
goto out_free;
}
}
if (likely(max > 0)) {
dest_base[(*len)] = 0x00;
} else {
ecryptfs_printk(KERN_ERR, "Error writing boundary byte\n");
rc = -EIO;
}
out_free:
kmem_cache_free(ecryptfs_key_record_cache, key_rec);
out:
if (rc)
(*len) = 0;
mutex_unlock(&crypt_stat->keysig_list_mutex);
return rc;
}
struct kmem_cache *ecryptfs_key_sig_cache;
int ecryptfs_add_keysig(struct ecryptfs_crypt_stat *crypt_stat, char *sig)
{
struct ecryptfs_key_sig *new_key_sig;
new_key_sig = kmem_cache_alloc(ecryptfs_key_sig_cache, GFP_KERNEL);
if (!new_key_sig)
return -ENOMEM;
memcpy(new_key_sig->keysig, sig, ECRYPTFS_SIG_SIZE_HEX);
new_key_sig->keysig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
/* Caller must hold keysig_list_mutex */
list_add(&new_key_sig->crypt_stat_list, &crypt_stat->keysig_list);
return 0;
}
struct kmem_cache *ecryptfs_global_auth_tok_cache;
int
ecryptfs_add_global_auth_tok(struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
char *sig, u32 global_auth_tok_flags)
{
struct ecryptfs_global_auth_tok *new_auth_tok;
new_auth_tok = kmem_cache_zalloc(ecryptfs_global_auth_tok_cache,
GFP_KERNEL);
if (!new_auth_tok)
return -ENOMEM;
memcpy(new_auth_tok->sig, sig, ECRYPTFS_SIG_SIZE_HEX);
new_auth_tok->flags = global_auth_tok_flags;
new_auth_tok->sig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_add(&new_auth_tok->mount_crypt_stat_list,
&mount_crypt_stat->global_auth_tok_list);
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
return 0;
}
| linux-master | fs/ecryptfs/keystore.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
* This is where eCryptfs coordinates the symmetric encryption and
* decryption of the file data as it passes between the lower
* encrypted file and the upper decrypted file.
*
* Copyright (C) 1997-2003 Erez Zadok
* Copyright (C) 2001-2003 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
*/
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <asm/unaligned.h>
#include "ecryptfs_kernel.h"
/*
* ecryptfs_get_locked_page
*
* Get one page from cache or lower f/s, return error otherwise.
*
* Returns locked and up-to-date page (if ok), with increased
* refcnt.
*/
struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
{
struct page *page = read_mapping_page(inode->i_mapping, index, NULL);
if (!IS_ERR(page))
lock_page(page);
return page;
}
/**
* ecryptfs_writepage
* @page: Page that is locked before this call is made
* @wbc: Write-back control structure
*
* Returns zero on success; non-zero otherwise
*
* This is where we encrypt the data and pass the encrypted data to
* the lower filesystem. In OpenPGP-compatible mode, we operate on
* entire underlying packets.
*/
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc;
rc = ecryptfs_encrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting "
"page (upper index [0x%.16lx])\n", page->index);
ClearPageUptodate(page);
goto out;
}
SetPageUptodate(page);
out:
unlock_page(page);
return rc;
}
static void strip_xattr_flag(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat)
{
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
size_t written;
crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
&written);
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
}
}
/*
* Header Extent:
* Octets 0-7: Unencrypted file size (big-endian)
* Octets 8-15: eCryptfs special marker
* Octets 16-19: Flags
* Octet 16: File format version number (between 0 and 255)
* Octets 17-18: Reserved
* Octet 19: Bit 1 (lsb): Reserved
* Bit 2: Encrypted?
* Bits 3-8: Reserved
* Octets 20-23: Header extent size (big-endian)
* Octets 24-25: Number of header extents at front of file
* (big-endian)
* Octet 26: Begin RFC 2440 authentication token packet set
*/
/**
* ecryptfs_copy_up_encrypted_with_header
* @page: Sort of a ``virtual'' representation of the encrypted lower
* file. The actual lower file does not have the metadata in
* the header. This is locked.
* @crypt_stat: The eCryptfs inode's cryptographic context
*
* The ``view'' is the version of the file that userspace winds up
* seeing, with the header information inserted.
*/
static int
ecryptfs_copy_up_encrypted_with_header(struct page *page,
struct ecryptfs_crypt_stat *crypt_stat)
{
loff_t extent_num_in_page = 0;
loff_t num_extents_per_page = (PAGE_SIZE
/ crypt_stat->extent_size);
int rc = 0;
while (extent_num_in_page < num_extents_per_page) {
loff_t view_extent_num = ((((loff_t)page->index)
* num_extents_per_page)
+ extent_num_in_page);
size_t num_header_extents_at_front =
(crypt_stat->metadata_size / crypt_stat->extent_size);
if (view_extent_num < num_header_extents_at_front) {
/* This is a header extent */
char *page_virt;
page_virt = kmap_local_page(page);
memset(page_virt, 0, PAGE_SIZE);
/* TODO: Support more than one header extent */
if (view_extent_num == 0) {
size_t written;
rc = ecryptfs_read_xattr_region(
page_virt, page->mapping->host);
strip_xattr_flag(page_virt + 16, crypt_stat);
ecryptfs_write_header_metadata(page_virt + 20,
crypt_stat,
&written);
}
kunmap_local(page_virt);
flush_dcache_page(page);
if (rc) {
printk(KERN_ERR "%s: Error reading xattr "
"region; rc = [%d]\n", __func__, rc);
goto out;
}
} else {
/* This is an encrypted data extent */
loff_t lower_offset =
((view_extent_num * crypt_stat->extent_size)
- crypt_stat->metadata_size);
rc = ecryptfs_read_lower_page_segment(
page, (lower_offset >> PAGE_SHIFT),
(lower_offset & ~PAGE_MASK),
crypt_stat->extent_size, page->mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
"extent at offset [%lld] in the lower "
"file; rc = [%d]\n", __func__,
lower_offset, rc);
goto out;
}
}
extent_num_in_page++;
}
out:
return rc;
}
/**
* ecryptfs_read_folio
* @file: An eCryptfs file
* @folio: Folio from eCryptfs inode mapping into which to stick the read data
*
* Read in a folio, decrypting if necessary.
*
* Returns zero on success; non-zero on error.
*/
static int ecryptfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
int rc = 0;
if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
PAGE_SIZE,
page->mapping->host);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
rc = ecryptfs_copy_up_encrypted_with_header(page,
crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting to copy "
"the encrypted content from the lower "
"file whilst inserting the metadata "
"from the xattr into the header; rc = "
"[%d]\n", __func__, rc);
goto out;
}
} else {
rc = ecryptfs_read_lower_page_segment(
page, page->index, 0, PAGE_SIZE,
page->mapping->host);
if (rc) {
printk(KERN_ERR "Error reading page; rc = "
"[%d]\n", rc);
goto out;
}
}
} else {
rc = ecryptfs_decrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error decrypting page; "
"rc = [%d]\n", rc);
goto out;
}
}
out:
if (rc)
ClearPageUptodate(page);
else
SetPageUptodate(page);
ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
page->index);
unlock_page(page);
return rc;
}
/*
* Called with lower inode mutex held.
*/
static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
{
struct inode *inode = page->mapping->host;
int end_byte_in_page;
if ((i_size_read(inode) / PAGE_SIZE) != page->index)
goto out;
end_byte_in_page = i_size_read(inode) % PAGE_SIZE;
if (to > end_byte_in_page)
end_byte_in_page = to;
zero_user_segment(page, end_byte_in_page, PAGE_SIZE);
out:
return 0;
}
/**
* ecryptfs_write_begin
* @file: The eCryptfs file
* @mapping: The eCryptfs object
* @pos: The file offset at which to start writing
* @len: Length of the write
* @pagep: Pointer to return the page
* @fsdata: Pointer to return fs data (unused)
*
* This function must zero any hole we create
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
loff_t prev_page_end_size;
int rc = 0;
page = grab_cache_page_write_begin(mapping, index);
if (!page)
return -ENOMEM;
*pagep = page;
prev_page_end_size = ((loff_t)index << PAGE_SHIFT);
if (!PageUptodate(page)) {
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_read_lower_page_segment(
page, index, 0, PAGE_SIZE, mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error attempting to read "
"lower page segment; rc = [%d]\n",
__func__, rc);
ClearPageUptodate(page);
goto out;
} else
SetPageUptodate(page);
} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
rc = ecryptfs_copy_up_encrypted_with_header(
page, crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting "
"to copy the encrypted content "
"from the lower file whilst "
"inserting the metadata from "
"the xattr into the header; rc "
"= [%d]\n", __func__, rc);
ClearPageUptodate(page);
goto out;
}
SetPageUptodate(page);
} else {
rc = ecryptfs_read_lower_page_segment(
page, index, 0, PAGE_SIZE,
mapping->host);
if (rc) {
printk(KERN_ERR "%s: Error reading "
"page; rc = [%d]\n",
__func__, rc);
ClearPageUptodate(page);
goto out;
}
SetPageUptodate(page);
}
} else {
if (prev_page_end_size
>= i_size_read(page->mapping->host)) {
zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else if (len < PAGE_SIZE) {
rc = ecryptfs_decrypt_page(page);
if (rc) {
printk(KERN_ERR "%s: Error decrypting "
"page at index [%ld]; "
"rc = [%d]\n",
__func__, page->index, rc);
ClearPageUptodate(page);
goto out;
}
SetPageUptodate(page);
}
}
}
/* If creating a page or more of holes, zero them out via truncate.
* Note, this will increase i_size. */
if (index != 0) {
if (prev_page_end_size > i_size_read(page->mapping->host)) {
rc = ecryptfs_truncate(file->f_path.dentry,
prev_page_end_size);
if (rc) {
printk(KERN_ERR "%s: Error on attempt to "
"truncate to (higher) offset [%lld];"
" rc = [%d]\n", __func__,
prev_page_end_size, rc);
goto out;
}
}
}
/* Writing to a new page, and creating a small hole from start
* of page? Zero it out. */
if ((i_size_read(mapping->host) == prev_page_end_size)
&& (pos != 0))
zero_user(page, 0, PAGE_SIZE);
out:
if (unlikely(rc)) {
unlock_page(page);
put_page(page);
*pagep = NULL;
}
return rc;
}
/*
* ecryptfs_write_inode_size_to_header
*
* Writes the lower file size to the first 8 bytes of the header.
*
* Returns zero on success; non-zero on error.
*/
static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode)
{
char *file_size_virt;
int rc;
file_size_virt = kmalloc(sizeof(u64), GFP_KERNEL);
if (!file_size_virt) {
rc = -ENOMEM;
goto out;
}
put_unaligned_be64(i_size_read(ecryptfs_inode), file_size_virt);
rc = ecryptfs_write_lower(ecryptfs_inode, file_size_virt, 0,
sizeof(u64));
kfree(file_size_virt);
if (rc < 0)
printk(KERN_ERR "%s: Error writing file size to header; "
"rc = [%d]\n", __func__, rc);
else
rc = 0;
out:
return rc;
}
struct kmem_cache *ecryptfs_xattr_cache;
static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
{
ssize_t size;
void *xattr_virt;
struct dentry *lower_dentry =
ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_path.dentry;
struct inode *lower_inode = d_inode(lower_dentry);
int rc;
if (!(lower_inode->i_opflags & IOP_XATTR)) {
printk(KERN_WARNING
"No support for setting xattr in lower filesystem\n");
rc = -ENOSYS;
goto out;
}
xattr_virt = kmem_cache_alloc(ecryptfs_xattr_cache, GFP_KERNEL);
if (!xattr_virt) {
rc = -ENOMEM;
goto out;
}
inode_lock(lower_inode);
size = __vfs_getxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
xattr_virt, PAGE_SIZE);
if (size < 0)
size = 8;
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
rc = __vfs_setxattr(&nop_mnt_idmap, lower_dentry, lower_inode,
ECRYPTFS_XATTR_NAME, xattr_virt, size, 0);
inode_unlock(lower_inode);
if (rc)
printk(KERN_ERR "Error whilst attempting to write inode size "
"to lower file xattr; rc = [%d]\n", rc);
kmem_cache_free(ecryptfs_xattr_cache, xattr_virt);
out:
return rc;
}
int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat;
crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
return ecryptfs_write_inode_size_to_xattr(ecryptfs_inode);
else
return ecryptfs_write_inode_size_to_header(ecryptfs_inode);
}
/**
* ecryptfs_write_end
* @file: The eCryptfs file object
* @mapping: The eCryptfs object
* @pos: The file position
* @len: The length of the data (unused)
* @copied: The amount of data copied
* @page: The eCryptfs page
* @fsdata: The fsdata (unused)
*/
static int ecryptfs_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
pgoff_t index = pos >> PAGE_SHIFT;
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + copied;
struct inode *ecryptfs_inode = mapping->host;
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc;
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0,
to);
if (!rc) {
rc = copied;
fsstack_copy_inode_size(ecryptfs_inode,
ecryptfs_inode_to_lower(ecryptfs_inode));
}
goto out;
}
if (!PageUptodate(page)) {
if (copied < PAGE_SIZE) {
rc = 0;
goto out;
}
SetPageUptodate(page);
}
/* Fills in zeros if 'to' goes beyond inode size */
rc = fill_zeros_to_end_of_page(page, to);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
rc = ecryptfs_encrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
"index [0x%.16lx])\n", index);
goto out;
}
if (pos + copied > i_size_read(ecryptfs_inode)) {
i_size_write(ecryptfs_inode, pos + copied);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
"[0x%.16llx]\n",
(unsigned long long)i_size_read(ecryptfs_inode));
}
rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
if (rc)
printk(KERN_ERR "Error writing inode size to metadata; "
"rc = [%d]\n", rc);
else
rc = copied;
out:
unlock_page(page);
put_page(page);
return rc;
}
static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *lower_inode = ecryptfs_inode_to_lower(mapping->host);
int ret = bmap(lower_inode, &block);
if (ret)
return 0;
return block;
}
#include <linux/buffer_head.h>
const struct address_space_operations ecryptfs_aops = {
/*
* XXX: This is pretty broken for multiple reasons: ecryptfs does not
* actually use buffer_heads, and ecryptfs will crash without
* CONFIG_BLOCK. But it matches the behavior before the default for
* address_space_operations without the ->dirty_folio method was
* cleaned up, so this is the best we can do without maintainer
* feedback.
*/
#ifdef CONFIG_BLOCK
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
#endif
.writepage = ecryptfs_writepage,
.read_folio = ecryptfs_read_folio,
.write_begin = ecryptfs_write_begin,
.write_end = ecryptfs_write_end,
.bmap = ecryptfs_bmap,
};
| linux-master | fs/ecryptfs/mmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 2008 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
*/
#include <linux/fs.h>
#include <linux/hash.h>
#include <linux/random.h>
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/module.h>
#include "ecryptfs_kernel.h"
static atomic_t ecryptfs_num_miscdev_opens;
/**
* ecryptfs_miscdev_poll
* @file: dev file
* @pt: dev poll table (ignored)
*
* Returns the poll mask
*/
static __poll_t
ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
{
struct ecryptfs_daemon *daemon = file->private_data;
__poll_t mask = 0;
mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
printk(KERN_WARNING "%s: Attempt to poll on zombified "
"daemon\n", __func__);
goto out_unlock_daemon;
}
if (daemon->flags & ECRYPTFS_DAEMON_IN_READ)
goto out_unlock_daemon;
if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)
goto out_unlock_daemon;
daemon->flags |= ECRYPTFS_DAEMON_IN_POLL;
mutex_unlock(&daemon->mux);
poll_wait(file, &daemon->wait, pt);
mutex_lock(&daemon->mux);
if (!list_empty(&daemon->msg_ctx_out_queue))
mask |= EPOLLIN | EPOLLRDNORM;
out_unlock_daemon:
daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL;
mutex_unlock(&daemon->mux);
return mask;
}
/**
* ecryptfs_miscdev_open
* @inode: inode of miscdev handle (ignored)
* @file: file for miscdev handle
*
* Returns zero on success; non-zero otherwise
*/
static int
ecryptfs_miscdev_open(struct inode *inode, struct file *file)
{
struct ecryptfs_daemon *daemon = NULL;
int rc;
mutex_lock(&ecryptfs_daemon_hash_mux);
rc = ecryptfs_find_daemon_by_euid(&daemon);
if (!rc) {
rc = -EINVAL;
goto out_unlock_daemon_list;
}
rc = ecryptfs_spawn_daemon(&daemon, file);
if (rc) {
printk(KERN_ERR "%s: Error attempting to spawn daemon; "
"rc = [%d]\n", __func__, rc);
goto out_unlock_daemon_list;
}
mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
rc = -EBUSY;
goto out_unlock_daemon;
}
daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
file->private_data = daemon;
atomic_inc(&ecryptfs_num_miscdev_opens);
out_unlock_daemon:
mutex_unlock(&daemon->mux);
out_unlock_daemon_list:
mutex_unlock(&ecryptfs_daemon_hash_mux);
return rc;
}
/**
* ecryptfs_miscdev_release
* @inode: inode of fs/ecryptfs/euid handle (ignored)
* @file: file for fs/ecryptfs/euid handle
*
* This keeps the daemon registered until the daemon sends another
* ioctl to fs/ecryptfs/ctl or until the kernel module unregisters.
*
* Returns zero on success; non-zero otherwise
*/
static int
ecryptfs_miscdev_release(struct inode *inode, struct file *file)
{
struct ecryptfs_daemon *daemon = file->private_data;
int rc;
mutex_lock(&daemon->mux);
BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
atomic_dec(&ecryptfs_num_miscdev_opens);
mutex_unlock(&daemon->mux);
mutex_lock(&ecryptfs_daemon_hash_mux);
rc = ecryptfs_exorcise_daemon(daemon);
mutex_unlock(&ecryptfs_daemon_hash_mux);
if (rc) {
printk(KERN_CRIT "%s: Fatal error whilst attempting to "
"shut down daemon; rc = [%d]. Please report this "
"bug.\n", __func__, rc);
BUG();
}
return rc;
}
/**
* ecryptfs_send_miscdev
* @data: Data to send to daemon; may be NULL
* @data_size: Amount of data to send to daemon
* @msg_ctx: Message context, which is used to handle the reply. If
* this is NULL, then we do not expect a reply.
* @msg_type: Type of message
* @msg_flags: Flags for message
* @daemon: eCryptfs daemon object
*
* Add msg_ctx to queue and then, if it exists, notify the blocked
* miscdevess about the data being available. Must be called with
* ecryptfs_daemon_hash_mux held.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_send_miscdev(char *data, size_t data_size,
struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
u16 msg_flags, struct ecryptfs_daemon *daemon)
{
struct ecryptfs_message *msg;
msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
if (!msg)
return -ENOMEM;
mutex_lock(&msg_ctx->mux);
msg_ctx->msg = msg;
msg_ctx->msg->index = msg_ctx->index;
msg_ctx->msg->data_len = data_size;
msg_ctx->type = msg_type;
memcpy(msg_ctx->msg->data, data, data_size);
msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
mutex_unlock(&msg_ctx->mux);
mutex_lock(&daemon->mux);
daemon->num_queued_msg_ctx++;
wake_up_interruptible(&daemon->wait);
mutex_unlock(&daemon->mux);
return 0;
}
/*
* miscdevfs packet format:
* Octet 0: Type
* Octets 1-4: network byte order msg_ctx->counter
* Octets 5-N0: Size of struct ecryptfs_message to follow
* Octets N0-N1: struct ecryptfs_message (including data)
*
* Octets 5-N1 not written if the packet type does not include a message
*/
#define PKT_TYPE_SIZE 1
#define PKT_CTR_SIZE 4
#define MIN_NON_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE)
#define MIN_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE \
+ ECRYPTFS_MIN_PKT_LEN_SIZE)
/* 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES comes from tag 65 packet format */
#define MAX_MSG_PKT_SIZE (PKT_TYPE_SIZE + PKT_CTR_SIZE \
+ ECRYPTFS_MAX_PKT_LEN_SIZE \
+ sizeof(struct ecryptfs_message) \
+ 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)
#define PKT_TYPE_OFFSET 0
#define PKT_CTR_OFFSET PKT_TYPE_SIZE
#define PKT_LEN_OFFSET (PKT_TYPE_SIZE + PKT_CTR_SIZE)
/**
* ecryptfs_miscdev_read - format and send message from queue
* @file: miscdevfs handle
* @buf: User buffer into which to copy the next message on the daemon queue
* @count: Amount of space available in @buf
* @ppos: Offset in file (ignored)
*
* Pulls the most recent message from the daemon queue, formats it for
* being sent via a miscdevfs handle, and copies it into @buf
*
* Returns the number of bytes copied into the user buffer
*/
static ssize_t
ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct ecryptfs_daemon *daemon = file->private_data;
struct ecryptfs_msg_ctx *msg_ctx;
size_t packet_length_size;
char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE];
size_t i;
size_t total_length;
int rc;
mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
rc = 0;
printk(KERN_WARNING "%s: Attempt to read from zombified "
"daemon\n", __func__);
goto out_unlock_daemon;
}
if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) {
rc = 0;
goto out_unlock_daemon;
}
/* This daemon will not go away so long as this flag is set */
daemon->flags |= ECRYPTFS_DAEMON_IN_READ;
check_list:
if (list_empty(&daemon->msg_ctx_out_queue)) {
mutex_unlock(&daemon->mux);
rc = wait_event_interruptible(
daemon->wait, !list_empty(&daemon->msg_ctx_out_queue));
mutex_lock(&daemon->mux);
if (rc < 0) {
rc = 0;
goto out_unlock_daemon;
}
}
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
rc = 0;
goto out_unlock_daemon;
}
if (list_empty(&daemon->msg_ctx_out_queue)) {
/* Something else jumped in since the
* wait_event_interruptable() and removed the
* message from the queue; try again */
goto check_list;
}
msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
struct ecryptfs_msg_ctx, daemon_out_list);
BUG_ON(!msg_ctx);
mutex_lock(&msg_ctx->mux);
if (msg_ctx->msg) {
rc = ecryptfs_write_packet_length(packet_length,
msg_ctx->msg_size,
&packet_length_size);
if (rc) {
rc = 0;
printk(KERN_WARNING "%s: Error writing packet length; "
"rc = [%d]\n", __func__, rc);
goto out_unlock_msg_ctx;
}
} else {
packet_length_size = 0;
msg_ctx->msg_size = 0;
}
total_length = (PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_length_size
+ msg_ctx->msg_size);
if (count < total_length) {
rc = 0;
printk(KERN_WARNING "%s: Only given user buffer of "
"size [%zd], but we need [%zd] to read the "
"pending message\n", __func__, count, total_length);
goto out_unlock_msg_ctx;
}
rc = -EFAULT;
if (put_user(msg_ctx->type, buf))
goto out_unlock_msg_ctx;
if (put_user(cpu_to_be32(msg_ctx->counter),
(__be32 __user *)(&buf[PKT_CTR_OFFSET])))
goto out_unlock_msg_ctx;
i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
if (msg_ctx->msg) {
if (copy_to_user(&buf[i], packet_length, packet_length_size))
goto out_unlock_msg_ctx;
i += packet_length_size;
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
goto out_unlock_msg_ctx;
i += msg_ctx->msg_size;
}
rc = i;
list_del(&msg_ctx->daemon_out_list);
kfree(msg_ctx->msg);
msg_ctx->msg = NULL;
/* We do not expect a reply from the userspace daemon for any
* message type other than ECRYPTFS_MSG_REQUEST */
if (msg_ctx->type != ECRYPTFS_MSG_REQUEST)
ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
out_unlock_msg_ctx:
mutex_unlock(&msg_ctx->mux);
out_unlock_daemon:
daemon->flags &= ~ECRYPTFS_DAEMON_IN_READ;
mutex_unlock(&daemon->mux);
return rc;
}
/**
* ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon
* @daemon: eCryptfs daemon object
* @data: Bytes comprising struct ecryptfs_message
* @data_size: sizeof(struct ecryptfs_message) + data len
* @seq: Sequence number for miscdev response packet
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_miscdev_response(struct ecryptfs_daemon *daemon, char *data,
size_t data_size, u32 seq)
{
struct ecryptfs_message *msg = (struct ecryptfs_message *)data;
int rc;
if ((sizeof(*msg) + msg->data_len) != data_size) {
printk(KERN_WARNING "%s: (sizeof(*msg) + msg->data_len) = "
"[%zd]; data_size = [%zd]. Invalid packet.\n", __func__,
(sizeof(*msg) + msg->data_len), data_size);
rc = -EINVAL;
goto out;
}
rc = ecryptfs_process_response(daemon, msg, seq);
if (rc)
printk(KERN_ERR
"Error processing response message; rc = [%d]\n", rc);
out:
return rc;
}
/**
* ecryptfs_miscdev_write - handle write to daemon miscdev handle
* @file: File for misc dev handle
* @buf: Buffer containing user data
* @count: Amount of data in @buf
* @ppos: Pointer to offset in file (ignored)
*
* Returns the number of bytes read from @buf
*/
static ssize_t
ecryptfs_miscdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
__be32 counter_nbo;
u32 seq;
size_t packet_size, packet_size_length;
char *data;
unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE];
ssize_t rc;
if (count == 0) {
return 0;
} else if (count == MIN_NON_MSG_PKT_SIZE) {
/* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
goto memdup;
} else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) {
printk(KERN_WARNING "%s: Acceptable packet size range is "
"[%d-%zu], but amount of data written is [%zu].\n",
__func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count);
return -EINVAL;
}
if (copy_from_user(packet_size_peek, &buf[PKT_LEN_OFFSET],
sizeof(packet_size_peek))) {
printk(KERN_WARNING "%s: Error while inspecting packet size\n",
__func__);
return -EFAULT;
}
rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
&packet_size_length);
if (rc) {
printk(KERN_WARNING "%s: Error parsing packet length; "
"rc = [%zd]\n", __func__, rc);
return rc;
}
if ((PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_size_length + packet_size)
!= count) {
printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
packet_size);
return -EINVAL;
}
memdup:
data = memdup_user(buf, count);
if (IS_ERR(data)) {
printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
__func__, PTR_ERR(data));
return PTR_ERR(data);
}
switch (data[PKT_TYPE_OFFSET]) {
case ECRYPTFS_MSG_RESPONSE:
if (count < (MIN_MSG_PKT_SIZE
+ sizeof(struct ecryptfs_message))) {
printk(KERN_WARNING "%s: Minimum acceptable packet "
"size is [%zd], but amount of data written is "
"only [%zd]. Discarding response packet.\n",
__func__,
(MIN_MSG_PKT_SIZE
+ sizeof(struct ecryptfs_message)), count);
rc = -EINVAL;
goto out_free;
}
memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE);
seq = be32_to_cpu(counter_nbo);
rc = ecryptfs_miscdev_response(file->private_data,
&data[PKT_LEN_OFFSET + packet_size_length],
packet_size, seq);
if (rc) {
printk(KERN_WARNING "%s: Failed to deliver miscdev "
"response to requesting operation; rc = [%zd]\n",
__func__, rc);
goto out_free;
}
break;
case ECRYPTFS_MSG_HELO:
case ECRYPTFS_MSG_QUIT:
break;
default:
ecryptfs_printk(KERN_WARNING, "Dropping miscdev "
"message of unrecognized type [%d]\n",
data[0]);
rc = -EINVAL;
goto out_free;
}
rc = count;
out_free:
kfree(data);
return rc;
}
static const struct file_operations ecryptfs_miscdev_fops = {
.owner = THIS_MODULE,
.open = ecryptfs_miscdev_open,
.poll = ecryptfs_miscdev_poll,
.read = ecryptfs_miscdev_read,
.write = ecryptfs_miscdev_write,
.release = ecryptfs_miscdev_release,
.llseek = noop_llseek,
};
static struct miscdevice ecryptfs_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ecryptfs",
.fops = &ecryptfs_miscdev_fops
};
/**
* ecryptfs_init_ecryptfs_miscdev
*
* Messages sent to the userspace daemon from the kernel are placed on
* a queue associated with the daemon. The next read against the
* miscdev handle by that daemon will return the oldest message placed
* on the message queue for the daemon.
*
* Returns zero on success; non-zero otherwise
*/
int __init ecryptfs_init_ecryptfs_miscdev(void)
{
int rc;
atomic_set(&ecryptfs_num_miscdev_opens, 0);
rc = misc_register(&ecryptfs_miscdev);
if (rc)
printk(KERN_ERR "%s: Failed to register miscellaneous device "
"for communications with userspace daemons; rc = [%d]\n",
__func__, rc);
return rc;
}
/**
* ecryptfs_destroy_ecryptfs_miscdev
*
* All of the daemons must be exorcised prior to calling this
* function.
*/
void ecryptfs_destroy_ecryptfs_miscdev(void)
{
BUG_ON(atomic_read(&ecryptfs_num_miscdev_opens) != 0);
misc_deregister(&ecryptfs_miscdev);
}
| linux-master | fs/ecryptfs/miscdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
* Functions only useful for debugging.
*
* Copyright (C) 2006 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
*/
#include "ecryptfs_kernel.h"
/*
* ecryptfs_dump_auth_tok - debug function to print auth toks
*
* This function will print the contents of an ecryptfs authentication
* token.
*/
void ecryptfs_dump_auth_tok(struct ecryptfs_auth_tok *auth_tok)
{
char salt[ECRYPTFS_SALT_SIZE * 2 + 1];
char sig[ECRYPTFS_SIG_SIZE_HEX + 1];
ecryptfs_printk(KERN_DEBUG, "Auth tok at mem loc [%p]:\n",
auth_tok);
if (auth_tok->flags & ECRYPTFS_PRIVATE_KEY) {
ecryptfs_printk(KERN_DEBUG, " * private key type\n");
} else {
ecryptfs_printk(KERN_DEBUG, " * passphrase type\n");
ecryptfs_to_hex(salt, auth_tok->token.password.salt,
ECRYPTFS_SALT_SIZE);
salt[ECRYPTFS_SALT_SIZE * 2] = '\0';
ecryptfs_printk(KERN_DEBUG, " * salt = [%s]\n", salt);
if (auth_tok->token.password.flags &
ECRYPTFS_PERSISTENT_PASSWORD) {
ecryptfs_printk(KERN_DEBUG, " * persistent\n");
}
memcpy(sig, auth_tok->token.password.signature,
ECRYPTFS_SIG_SIZE_HEX);
sig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
ecryptfs_printk(KERN_DEBUG, " * signature = [%s]\n", sig);
}
ecryptfs_printk(KERN_DEBUG, " * session_key.flags = [0x%x]\n",
auth_tok->session_key.flags);
if (auth_tok->session_key.flags
& ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT)
ecryptfs_printk(KERN_DEBUG,
" * Userspace decrypt request set\n");
if (auth_tok->session_key.flags
& ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT)
ecryptfs_printk(KERN_DEBUG,
" * Userspace encrypt request set\n");
if (auth_tok->session_key.flags & ECRYPTFS_CONTAINS_DECRYPTED_KEY) {
ecryptfs_printk(KERN_DEBUG, " * Contains decrypted key\n");
ecryptfs_printk(KERN_DEBUG,
" * session_key.decrypted_key_size = [0x%x]\n",
auth_tok->session_key.decrypted_key_size);
ecryptfs_printk(KERN_DEBUG, " * Decrypted session key "
"dump:\n");
if (ecryptfs_verbosity > 0)
ecryptfs_dump_hex(auth_tok->session_key.decrypted_key,
ECRYPTFS_DEFAULT_KEY_BYTES);
}
if (auth_tok->session_key.flags & ECRYPTFS_CONTAINS_ENCRYPTED_KEY) {
ecryptfs_printk(KERN_DEBUG, " * Contains encrypted key\n");
ecryptfs_printk(KERN_DEBUG,
" * session_key.encrypted_key_size = [0x%x]\n",
auth_tok->session_key.encrypted_key_size);
ecryptfs_printk(KERN_DEBUG, " * Encrypted session key "
"dump:\n");
if (ecryptfs_verbosity > 0)
ecryptfs_dump_hex(auth_tok->session_key.encrypted_key,
auth_tok->session_key.
encrypted_key_size);
}
}
/**
* ecryptfs_dump_hex - debug hex printer
* @data: string of bytes to be printed
* @bytes: number of bytes to print
*
* Dump hexadecimal representation of char array
*/
void ecryptfs_dump_hex(char *data, int bytes)
{
if (ecryptfs_verbosity < 1)
return;
print_hex_dump(KERN_DEBUG, "ecryptfs: ", DUMP_PREFIX_OFFSET, 16, 1,
data, bytes, false);
}
| linux-master | fs/ecryptfs/debug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2004 Erez Zadok
* Copyright (C) 2001-2004 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompsion <[email protected]>
*/
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/fs_stack.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/fileattr.h>
#include <asm/unaligned.h>
#include "ecryptfs_kernel.h"
static int lock_parent(struct dentry *dentry,
struct dentry **lower_dentry,
struct inode **lower_dir)
{
struct dentry *lower_dir_dentry;
lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
*lower_dir = d_inode(lower_dir_dentry);
*lower_dentry = ecryptfs_dentry_to_lower(dentry);
inode_lock_nested(*lower_dir, I_MUTEX_PARENT);
return (*lower_dentry)->d_parent == lower_dir_dentry ? 0 : -EINVAL;
}
static int ecryptfs_inode_test(struct inode *inode, void *lower_inode)
{
return ecryptfs_inode_to_lower(inode) == lower_inode;
}
static int ecryptfs_inode_set(struct inode *inode, void *opaque)
{
struct inode *lower_inode = opaque;
ecryptfs_set_inode_lower(inode, lower_inode);
fsstack_copy_attr_all(inode, lower_inode);
/* i_size will be overwritten for encrypted regular files */
fsstack_copy_inode_size(inode, lower_inode);
inode->i_ino = lower_inode->i_ino;
inode->i_mapping->a_ops = &ecryptfs_aops;
if (S_ISLNK(inode->i_mode))
inode->i_op = &ecryptfs_symlink_iops;
else if (S_ISDIR(inode->i_mode))
inode->i_op = &ecryptfs_dir_iops;
else
inode->i_op = &ecryptfs_main_iops;
if (S_ISDIR(inode->i_mode))
inode->i_fop = &ecryptfs_dir_fops;
else if (special_file(inode->i_mode))
init_special_inode(inode, inode->i_mode, inode->i_rdev);
else
inode->i_fop = &ecryptfs_main_fops;
return 0;
}
static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb)
{
struct inode *inode;
if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
return ERR_PTR(-EXDEV);
if (!igrab(lower_inode))
return ERR_PTR(-ESTALE);
inode = iget5_locked(sb, (unsigned long)lower_inode,
ecryptfs_inode_test, ecryptfs_inode_set,
lower_inode);
if (!inode) {
iput(lower_inode);
return ERR_PTR(-EACCES);
}
if (!(inode->i_state & I_NEW))
iput(lower_inode);
return inode;
}
struct inode *ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb)
{
struct inode *inode = __ecryptfs_get_inode(lower_inode, sb);
if (!IS_ERR(inode) && (inode->i_state & I_NEW))
unlock_new_inode(inode);
return inode;
}
/**
* ecryptfs_interpose
* @lower_dentry: Existing dentry in the lower filesystem
* @dentry: ecryptfs' dentry
* @sb: ecryptfs's super_block
*
* Interposes upper and lower dentries.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_interpose(struct dentry *lower_dentry,
struct dentry *dentry, struct super_block *sb)
{
struct inode *inode = ecryptfs_get_inode(d_inode(lower_dentry), sb);
if (IS_ERR(inode))
return PTR_ERR(inode);
d_instantiate(dentry, inode);
return 0;
}
static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct dentry *lower_dentry;
struct inode *lower_dir;
int rc;
rc = lock_parent(dentry, &lower_dentry, &lower_dir);
dget(lower_dentry); // don't even try to make the lower negative
if (!rc) {
if (d_unhashed(lower_dentry))
rc = -EINVAL;
else
rc = vfs_unlink(&nop_mnt_idmap, lower_dir, lower_dentry,
NULL);
}
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
}
fsstack_copy_attr_times(dir, lower_dir);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
out_unlock:
dput(lower_dentry);
inode_unlock(lower_dir);
if (!rc)
d_drop(dentry);
return rc;
}
/**
* ecryptfs_do_create
* @directory_inode: inode of the new file's dentry's parent in ecryptfs
* @ecryptfs_dentry: New file's dentry in ecryptfs
* @mode: The mode of the new file
*
* Creates the underlying file and the eCryptfs inode which will link to
* it. It will also update the eCryptfs directory inode to mimic the
* stat of the lower directory inode.
*
* Returns the new eCryptfs inode on success; an ERR_PTR on error condition
*/
static struct inode *
ecryptfs_do_create(struct inode *directory_inode,
struct dentry *ecryptfs_dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_dir;
struct inode *inode;
rc = lock_parent(ecryptfs_dentry, &lower_dentry, &lower_dir);
if (!rc)
rc = vfs_create(&nop_mnt_idmap, lower_dir,
lower_dentry, mode, true);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
inode = ERR_PTR(rc);
goto out_lock;
}
inode = __ecryptfs_get_inode(d_inode(lower_dentry),
directory_inode->i_sb);
if (IS_ERR(inode)) {
vfs_unlink(&nop_mnt_idmap, lower_dir, lower_dentry, NULL);
goto out_lock;
}
fsstack_copy_attr_times(directory_inode, lower_dir);
fsstack_copy_inode_size(directory_inode, lower_dir);
out_lock:
inode_unlock(lower_dir);
return inode;
}
/*
* ecryptfs_initialize_file
*
* Cause the file to be changed from a basic empty file to an ecryptfs
* file with a header and first data page.
*
* Returns zero on success
*/
int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc = 0;
if (S_ISDIR(ecryptfs_inode->i_mode)) {
ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
goto out;
}
ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
rc = ecryptfs_new_file_context(ecryptfs_inode);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error creating new file "
"context; rc = [%d]\n", rc);
goto out;
}
rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%pd]; rc = [%d]\n", __func__,
ecryptfs_dentry, rc);
goto out;
}
rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
if (rc)
printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
ecryptfs_put_lower_file(ecryptfs_inode);
out:
return rc;
}
/*
* ecryptfs_create
* @mode: The mode of the new file.
*
* Creates a new file.
*
* Returns zero on success; non-zero on error condition
*/
static int
ecryptfs_create(struct mnt_idmap *idmap,
struct inode *directory_inode, struct dentry *ecryptfs_dentry,
umode_t mode, bool excl)
{
struct inode *ecryptfs_inode;
int rc;
ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
mode);
if (IS_ERR(ecryptfs_inode)) {
ecryptfs_printk(KERN_WARNING, "Failed to create file in"
"lower filesystem\n");
rc = PTR_ERR(ecryptfs_inode);
goto out;
}
/* At this point, a file exists on "disk"; we need to make sure
* that this on disk file is prepared to be an ecryptfs file */
rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
ecryptfs_inode);
iget_failed(ecryptfs_inode);
goto out;
}
d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
out:
return rc;
}
static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
{
struct ecryptfs_crypt_stat *crypt_stat;
int rc;
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%pd]; rc = [%d]\n", __func__,
dentry, rc);
return rc;
}
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
/* TODO: lock for crypt_stat comparison */
if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
ecryptfs_set_default_sizes(crypt_stat);
rc = ecryptfs_read_and_validate_header_region(inode);
ecryptfs_put_lower_file(inode);
if (rc) {
rc = ecryptfs_read_and_validate_xattr_region(dentry, inode);
if (!rc)
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
}
/* Must return 0 to allow non-eCryptfs files to be looked up, too */
return 0;
}
/*
* ecryptfs_lookup_interpose - Dentry interposition for a lookup
*/
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
const struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info;
int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!dentry_info) {
dput(lower_dentry);
return ERR_PTR(-ENOMEM);
}
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
d_inode(path->dentry));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry;
/*
* negative dentry can go positive under us here - its parent is not
* locked. That's OK and that could happen just as we return from
* ecryptfs_lookup() anyway. Just need to be careful and fetch
* ->d_inode only once - it's not stable here.
*/
lower_inode = READ_ONCE(lower_dentry->d_inode);
if (!lower_inode) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return NULL;
}
inode = __ecryptfs_get_inode(lower_inode, dentry->d_sb);
if (IS_ERR(inode)) {
printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n",
__func__, PTR_ERR(inode));
return ERR_CAST(inode);
}
if (S_ISREG(inode->i_mode)) {
rc = ecryptfs_i_size_read(dentry, inode);
if (rc) {
make_bad_inode(inode);
return ERR_PTR(rc);
}
}
if (inode->i_state & I_NEW)
unlock_new_inode(inode);
return d_splice_alias(inode, dentry);
}
/**
* ecryptfs_lookup
* @ecryptfs_dir_inode: The eCryptfs directory inode
* @ecryptfs_dentry: The eCryptfs dentry that we are looking up
* @flags: lookup flags
*
* Find a file on disk. If the file does not exist, then we'll add it to the
* dentry cache and continue on to read it from the disk.
*/
static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
struct dentry *ecryptfs_dentry,
unsigned int flags)
{
char *encrypted_and_encoded_name = NULL;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct dentry *lower_dir_dentry, *lower_dentry;
const char *name = ecryptfs_dentry->d_name.name;
size_t len = ecryptfs_dentry->d_name.len;
struct dentry *res;
int rc = 0;
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
rc = ecryptfs_encrypt_and_encode_filename(
&encrypted_and_encoded_name, &len,
mount_crypt_stat, name, len);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt and encode "
"filename; rc = [%d]\n", __func__, rc);
return ERR_PTR(rc);
}
name = encrypted_and_encoded_name;
}
lower_dentry = lookup_one_len_unlocked(name, lower_dir_dentry, len);
if (IS_ERR(lower_dentry)) {
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%ld] on lower_dentry = [%s]\n", __func__,
PTR_ERR(lower_dentry),
name);
res = ERR_CAST(lower_dentry);
} else {
res = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry);
}
kfree(encrypted_and_encoded_name);
return res;
}
static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
struct dentry *lower_old_dentry;
struct dentry *lower_new_dentry;
struct inode *lower_dir;
u64 file_size_save;
int rc;
file_size_save = i_size_read(d_inode(old_dentry));
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
rc = lock_parent(new_dentry, &lower_new_dentry, &lower_dir);
if (!rc)
rc = vfs_link(lower_old_dentry, &nop_mnt_idmap, lower_dir,
lower_new_dentry, NULL);
if (rc || d_really_is_negative(lower_new_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
if (rc)
goto out_lock;
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(d_inode(old_dentry),
ecryptfs_inode_to_lower(d_inode(old_dentry))->i_nlink);
i_size_write(d_inode(new_dentry), file_size_save);
out_lock:
inode_unlock(lower_dir);
return rc;
}
static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
{
return ecryptfs_do_unlink(dir, dentry, d_inode(dentry));
}
static int ecryptfs_symlink(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry,
const char *symname)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_dir;
char *encoded_symname;
size_t encoded_symlen;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
rc = lock_parent(dentry, &lower_dentry, &lower_dir);
if (rc)
goto out_lock;
mount_crypt_stat = &ecryptfs_superblock_to_private(
dir->i_sb)->mount_crypt_stat;
rc = ecryptfs_encrypt_and_encode_filename(&encoded_symname,
&encoded_symlen,
mount_crypt_stat, symname,
strlen(symname));
if (rc)
goto out_lock;
rc = vfs_symlink(&nop_mnt_idmap, lower_dir, lower_dentry,
encoded_symname);
kfree(encoded_symname);
if (rc || d_really_is_negative(lower_dentry))
goto out_lock;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out_lock;
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out_lock:
inode_unlock(lower_dir);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
static int ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_dir;
rc = lock_parent(dentry, &lower_dentry, &lower_dir);
if (!rc)
rc = vfs_mkdir(&nop_mnt_idmap, lower_dir,
lower_dentry, mode);
if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
out:
inode_unlock(lower_dir);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct dentry *lower_dentry;
struct inode *lower_dir;
int rc;
rc = lock_parent(dentry, &lower_dentry, &lower_dir);
dget(lower_dentry); // don't even try to make the lower negative
if (!rc) {
if (d_unhashed(lower_dentry))
rc = -EINVAL;
else
rc = vfs_rmdir(&nop_mnt_idmap, lower_dir, lower_dentry);
}
if (!rc) {
clear_nlink(d_inode(dentry));
fsstack_copy_attr_times(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
}
dput(lower_dentry);
inode_unlock(lower_dir);
if (!rc)
d_drop(dentry);
return rc;
}
static int
ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_dir;
rc = lock_parent(dentry, &lower_dentry, &lower_dir);
if (!rc)
rc = vfs_mknod(&nop_mnt_idmap, lower_dir,
lower_dentry, mode, dev);
if (rc || d_really_is_negative(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out:
inode_unlock(lower_dir);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;
}
static int
ecryptfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
int rc;
struct dentry *lower_old_dentry;
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
struct dentry *trap;
struct inode *target_inode;
struct renamedata rd = {};
if (flags)
return -EINVAL;
lower_old_dir_dentry = ecryptfs_dentry_to_lower(old_dentry->d_parent);
lower_new_dir_dentry = ecryptfs_dentry_to_lower(new_dentry->d_parent);
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
target_inode = d_inode(new_dentry);
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dget(lower_new_dentry);
rc = -EINVAL;
if (lower_old_dentry->d_parent != lower_old_dir_dentry)
goto out_lock;
if (lower_new_dentry->d_parent != lower_new_dir_dentry)
goto out_lock;
if (d_unhashed(lower_old_dentry) || d_unhashed(lower_new_dentry))
goto out_lock;
/* source should not be ancestor of target */
if (trap == lower_old_dentry)
goto out_lock;
/* target should not be ancestor of source */
if (trap == lower_new_dentry) {
rc = -ENOTEMPTY;
goto out_lock;
}
rd.old_mnt_idmap = &nop_mnt_idmap;
rd.old_dir = d_inode(lower_old_dir_dentry);
rd.old_dentry = lower_old_dentry;
rd.new_mnt_idmap = &nop_mnt_idmap;
rd.new_dir = d_inode(lower_new_dir_dentry);
rd.new_dentry = lower_new_dentry;
rc = vfs_rename(&rd);
if (rc)
goto out_lock;
if (target_inode)
fsstack_copy_attr_all(target_inode,
ecryptfs_inode_to_lower(target_inode));
fsstack_copy_attr_all(new_dir, d_inode(lower_new_dir_dentry));
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, d_inode(lower_old_dir_dentry));
out_lock:
dput(lower_new_dentry);
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
return rc;
}
static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
{
DEFINE_DELAYED_CALL(done);
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
const char *link;
char *buf;
int rc;
link = vfs_get_link(lower_dentry, &done);
if (IS_ERR(link))
return ERR_CAST(link);
rc = ecryptfs_decode_and_decrypt_filename(&buf, bufsiz, dentry->d_sb,
link, strlen(link));
do_delayed_call(&done);
if (rc)
return ERR_PTR(rc);
return buf;
}
static const char *ecryptfs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
size_t len;
char *buf;
if (!dentry)
return ERR_PTR(-ECHILD);
buf = ecryptfs_readlink_lower(dentry, &len);
if (IS_ERR(buf))
return buf;
fsstack_copy_attr_atime(d_inode(dentry),
d_inode(ecryptfs_dentry_to_lower(dentry)));
buf[len] = '\0';
set_delayed_call(done, kfree_link, buf);
return buf;
}
/**
* upper_size_to_lower_size
* @crypt_stat: Crypt_stat associated with file
* @upper_size: Size of the upper file
*
* Calculate the required size of the lower file based on the
* specified size of the upper file. This calculation is based on the
* number of headers in the underlying file and the extent size.
*
* Returns Calculated size of the lower file.
*/
static loff_t
upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
loff_t upper_size)
{
loff_t lower_size;
lower_size = ecryptfs_lower_header_size(crypt_stat);
if (upper_size != 0) {
loff_t num_extents;
num_extents = upper_size >> crypt_stat->extent_shift;
if (upper_size & ~crypt_stat->extent_mask)
num_extents++;
lower_size += (num_extents * crypt_stat->extent_size);
}
return lower_size;
}
/**
* truncate_upper
* @dentry: The ecryptfs layer dentry
* @ia: Address of the ecryptfs inode's attributes
* @lower_ia: Address of the lower inode's attributes
*
* Function to handle truncations modifying the size of the file. Note
* that the file sizes are interpolated. When expanding, we are simply
* writing strings of 0's out. When truncating, we truncate the upper
* inode and update the lower_ia according to the page index
* interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return,
* the caller must use lower_ia in a call to notify_change() to perform
* the truncation of the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
static int truncate_upper(struct dentry *dentry, struct iattr *ia,
struct iattr *lower_ia)
{
int rc = 0;
struct inode *inode = d_inode(dentry);
struct ecryptfs_crypt_stat *crypt_stat;
loff_t i_size = i_size_read(inode);
loff_t lower_size_before_truncate;
loff_t lower_size_after_truncate;
if (unlikely((ia->ia_size == i_size))) {
lower_ia->ia_valid &= ~ATTR_SIZE;
return 0;
}
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc)
return rc;
crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
/* Switch on growing or shrinking file */
if (ia->ia_size > i_size) {
char zero[] = { 0x00 };
lower_ia->ia_valid &= ~ATTR_SIZE;
/* Write a single 0 at the last position of the file;
* this triggers code that will fill in 0's throughout
* the intermediate portion of the previous end of the
* file and the new and of the file */
rc = ecryptfs_write(inode, zero,
(ia->ia_size - 1), 1);
} else { /* ia->ia_size < i_size_read(inode) */
/* We're chopping off all the pages down to the page
* in which ia->ia_size is located. Fill in the end of
* that page from (ia->ia_size & ~PAGE_MASK) to
* PAGE_SIZE with zeros. */
size_t num_zeros = (PAGE_SIZE
- (ia->ia_size & ~PAGE_MASK));
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
truncate_setsize(inode, ia->ia_size);
lower_ia->ia_size = ia->ia_size;
lower_ia->ia_valid |= ATTR_SIZE;
goto out;
}
if (num_zeros) {
char *zeros_virt;
zeros_virt = kzalloc(num_zeros, GFP_KERNEL);
if (!zeros_virt) {
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_write(inode, zeros_virt,
ia->ia_size, num_zeros);
kfree(zeros_virt);
if (rc) {
printk(KERN_ERR "Error attempting to zero out "
"the remainder of the end page on "
"reducing truncate; rc = [%d]\n", rc);
goto out;
}
}
truncate_setsize(inode, ia->ia_size);
rc = ecryptfs_write_inode_size_to_metadata(inode);
if (rc) {
printk(KERN_ERR "Problem with "
"ecryptfs_write_inode_size_to_metadata; "
"rc = [%d]\n", rc);
goto out;
}
/* We are reducing the size of the ecryptfs file, and need to
* know if we need to reduce the size of the lower file. */
lower_size_before_truncate =
upper_size_to_lower_size(crypt_stat, i_size);
lower_size_after_truncate =
upper_size_to_lower_size(crypt_stat, ia->ia_size);
if (lower_size_after_truncate < lower_size_before_truncate) {
lower_ia->ia_size = lower_size_after_truncate;
lower_ia->ia_valid |= ATTR_SIZE;
} else
lower_ia->ia_valid &= ~ATTR_SIZE;
}
out:
ecryptfs_put_lower_file(inode);
return rc;
}
static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
{
struct ecryptfs_crypt_stat *crypt_stat;
loff_t lower_oldsize, lower_newsize;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
lower_oldsize = upper_size_to_lower_size(crypt_stat,
i_size_read(inode));
lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
if (lower_newsize > lower_oldsize) {
/*
* The eCryptfs inode and the new *lower* size are mixed here
* because we may not have the lower i_mutex held and/or it may
* not be appropriate to call inode_newsize_ok() with inodes
* from other filesystems.
*/
return inode_newsize_ok(inode, lower_newsize);
}
return 0;
}
/**
* ecryptfs_truncate
* @dentry: The ecryptfs layer dentry
* @new_length: The length to expand the file to
*
* Simple function that handles the truncation of an eCryptfs inode and
* its corresponding lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
{
struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length };
struct iattr lower_ia = { .ia_valid = 0 };
int rc;
rc = ecryptfs_inode_newsize_ok(d_inode(dentry), new_length);
if (rc)
return rc;
rc = truncate_upper(dentry, &ia, &lower_ia);
if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
inode_lock(d_inode(lower_dentry));
rc = notify_change(&nop_mnt_idmap, lower_dentry,
&lower_ia, NULL);
inode_unlock(d_inode(lower_dentry));
}
return rc;
}
static int
ecryptfs_permission(struct mnt_idmap *idmap, struct inode *inode,
int mask)
{
return inode_permission(&nop_mnt_idmap,
ecryptfs_inode_to_lower(inode), mask);
}
/**
* ecryptfs_setattr
* @idmap: idmap of the target mount
* @dentry: dentry handle to the inode to modify
* @ia: Structure with flags of what to change and values
*
* Updates the metadata of an inode. If the update is to the size
* i.e. truncation, then ecryptfs_truncate will handle the size modification
* of both the ecryptfs inode and the lower inode.
*
* All other metadata changes will be passed right to the lower filesystem,
* and we will just update our inode to look like the lower.
*/
static int ecryptfs_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
struct dentry *lower_dentry;
struct iattr lower_ia;
struct inode *inode;
struct inode *lower_inode;
struct ecryptfs_crypt_stat *crypt_stat;
crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) {
rc = ecryptfs_init_crypt_stat(crypt_stat);
if (rc)
return rc;
}
inode = d_inode(dentry);
lower_inode = ecryptfs_inode_to_lower(inode);
lower_dentry = ecryptfs_dentry_to_lower(dentry);
mutex_lock(&crypt_stat->cs_mutex);
if (d_is_dir(dentry))
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
else if (d_is_reg(dentry)
&& (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
|| !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc) {
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
rc = ecryptfs_read_metadata(dentry);
ecryptfs_put_lower_file(inode);
if (rc) {
if (!(mount_crypt_stat->flags
& ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
rc = -EIO;
printk(KERN_WARNING "Either the lower file "
"is not in a valid eCryptfs format, "
"or the key could not be retrieved. "
"Plaintext passthrough mode is not "
"enabled; returning -EIO\n");
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
rc = 0;
crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
| ECRYPTFS_ENCRYPTED);
}
}
mutex_unlock(&crypt_stat->cs_mutex);
rc = setattr_prepare(&nop_mnt_idmap, dentry, ia);
if (rc)
goto out;
if (ia->ia_valid & ATTR_SIZE) {
rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
if (rc)
goto out;
}
memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
if (ia->ia_valid & ATTR_SIZE) {
rc = truncate_upper(dentry, ia, &lower_ia);
if (rc < 0)
goto out;
}
/*
* mode change is for clearing setuid/setgid bits. Allow lower fs
* to interpret this in its own way.
*/
if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
lower_ia.ia_valid &= ~ATTR_MODE;
inode_lock(d_inode(lower_dentry));
rc = notify_change(&nop_mnt_idmap, lower_dentry, &lower_ia, NULL);
inode_unlock(d_inode(lower_dentry));
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
static int ecryptfs_getattr_link(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
int rc = 0;
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
char *target;
size_t targetsiz;
target = ecryptfs_readlink_lower(dentry, &targetsiz);
if (!IS_ERR(target)) {
kfree(target);
stat->size = targetsiz;
} else {
rc = PTR_ERR(target);
}
}
return rc;
}
static int ecryptfs_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
struct kstat lower_stat;
int rc;
rc = vfs_getattr(ecryptfs_dentry_to_lower_path(dentry), &lower_stat,
request_mask, flags);
if (!rc) {
fsstack_copy_attr_all(d_inode(dentry),
ecryptfs_inode_to_lower(d_inode(dentry)));
generic_fillattr(&nop_mnt_idmap, request_mask,
d_inode(dentry), stat);
stat->blocks = lower_stat.blocks;
}
return rc;
}
int
ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_inode;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_inode = d_inode(lower_dentry);
if (!(lower_inode->i_opflags & IOP_XATTR)) {
rc = -EOPNOTSUPP;
goto out;
}
inode_lock(lower_inode);
rc = __vfs_setxattr_locked(&nop_mnt_idmap, lower_dentry, name, value, size, flags, NULL);
inode_unlock(lower_inode);
if (!rc && inode)
fsstack_copy_attr_all(inode, lower_inode);
out:
return rc;
}
ssize_t
ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
const char *name, void *value, size_t size)
{
int rc;
if (!(lower_inode->i_opflags & IOP_XATTR)) {
rc = -EOPNOTSUPP;
goto out;
}
inode_lock(lower_inode);
rc = __vfs_getxattr(lower_dentry, lower_inode, name, value, size);
inode_unlock(lower_inode);
out:
return rc;
}
static ssize_t
ecryptfs_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size)
{
return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
ecryptfs_inode_to_lower(inode),
name, value, size);
}
static ssize_t
ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
{
int rc = 0;
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!d_inode(lower_dentry)->i_op->listxattr) {
rc = -EOPNOTSUPP;
goto out;
}
inode_lock(d_inode(lower_dentry));
rc = d_inode(lower_dentry)->i_op->listxattr(lower_dentry, list, size);
inode_unlock(d_inode(lower_dentry));
out:
return rc;
}
static int ecryptfs_removexattr(struct dentry *dentry, struct inode *inode,
const char *name)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_inode;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_inode = ecryptfs_inode_to_lower(inode);
if (!(lower_inode->i_opflags & IOP_XATTR)) {
rc = -EOPNOTSUPP;
goto out;
}
inode_lock(lower_inode);
rc = __vfs_removexattr(&nop_mnt_idmap, lower_dentry, name);
inode_unlock(lower_inode);
out:
return rc;
}
static int ecryptfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
{
return vfs_fileattr_get(ecryptfs_dentry_to_lower(dentry), fa);
}
static int ecryptfs_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc;
rc = vfs_fileattr_set(&nop_mnt_idmap, lower_dentry, fa);
fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry));
return rc;
}
static struct posix_acl *ecryptfs_get_acl(struct mnt_idmap *idmap,
struct dentry *dentry, int type)
{
return vfs_get_acl(idmap, ecryptfs_dentry_to_lower(dentry),
posix_acl_xattr_name(type));
}
static int ecryptfs_set_acl(struct mnt_idmap *idmap,
struct dentry *dentry, struct posix_acl *acl,
int type)
{
int rc;
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
struct inode *lower_inode = d_inode(lower_dentry);
rc = vfs_set_acl(&nop_mnt_idmap, lower_dentry,
posix_acl_xattr_name(type), acl);
if (!rc)
fsstack_copy_attr_all(d_inode(dentry), lower_inode);
return rc;
}
const struct inode_operations ecryptfs_symlink_iops = {
.get_link = ecryptfs_get_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.getattr = ecryptfs_getattr_link,
.listxattr = ecryptfs_listxattr,
};
const struct inode_operations ecryptfs_dir_iops = {
.create = ecryptfs_create,
.lookup = ecryptfs_lookup,
.link = ecryptfs_link,
.unlink = ecryptfs_unlink,
.symlink = ecryptfs_symlink,
.mkdir = ecryptfs_mkdir,
.rmdir = ecryptfs_rmdir,
.mknod = ecryptfs_mknod,
.rename = ecryptfs_rename,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.listxattr = ecryptfs_listxattr,
.fileattr_get = ecryptfs_fileattr_get,
.fileattr_set = ecryptfs_fileattr_set,
.get_acl = ecryptfs_get_acl,
.set_acl = ecryptfs_set_acl,
};
const struct inode_operations ecryptfs_main_iops = {
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.getattr = ecryptfs_getattr,
.listxattr = ecryptfs_listxattr,
.fileattr_get = ecryptfs_fileattr_get,
.fileattr_set = ecryptfs_fileattr_set,
.get_acl = ecryptfs_get_acl,
.set_acl = ecryptfs_set_acl,
};
static int ecryptfs_xattr_get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return ecryptfs_getxattr(dentry, inode, name, buffer, size);
}
static int ecryptfs_xattr_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value, size_t size,
int flags)
{
if (value)
return ecryptfs_setxattr(dentry, inode, name, value, size, flags);
else {
BUG_ON(flags != XATTR_REPLACE);
return ecryptfs_removexattr(dentry, inode, name);
}
}
static const struct xattr_handler ecryptfs_xattr_handler = {
.prefix = "", /* match anything */
.get = ecryptfs_xattr_get,
.set = ecryptfs_xattr_set,
};
const struct xattr_handler *ecryptfs_xattr_handlers[] = {
&ecryptfs_xattr_handler,
NULL
};
| linux-master | fs/ecryptfs/inode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2003 Erez Zadok
* Copyright (C) 2001-2003 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompson <[email protected]>
* Tyler Hicks <[email protected]>
*/
#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/namei.h>
#include <linux/skbuff.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/key.h>
#include <linux/parser.h>
#include <linux/fs_stack.h>
#include <linux/slab.h>
#include <linux/magic.h>
#include "ecryptfs_kernel.h"
/*
* Module parameter that defines the ecryptfs_verbosity level.
*/
int ecryptfs_verbosity = 0;
module_param(ecryptfs_verbosity, int, 0);
MODULE_PARM_DESC(ecryptfs_verbosity,
"Initial verbosity level (0 or 1; defaults to "
"0, which is Quiet)");
/*
* Module parameter that defines the number of message buffer elements
*/
unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS;
module_param(ecryptfs_message_buf_len, uint, 0);
MODULE_PARM_DESC(ecryptfs_message_buf_len,
"Number of message buffer elements");
/*
* Module parameter that defines the maximum guaranteed amount of time to wait
* for a response from ecryptfsd. The actual sleep time will be, more than
* likely, a small amount greater than this specified value, but only less if
* the message successfully arrives.
*/
signed long ecryptfs_message_wait_timeout = ECRYPTFS_MAX_MSG_CTX_TTL / HZ;
module_param(ecryptfs_message_wait_timeout, long, 0);
MODULE_PARM_DESC(ecryptfs_message_wait_timeout,
"Maximum number of seconds that an operation will "
"sleep while waiting for a message response from "
"userspace");
/*
* Module parameter that is an estimate of the maximum number of users
* that will be concurrently using eCryptfs. Set this to the right
* value to balance performance and memory use.
*/
unsigned int ecryptfs_number_of_users = ECRYPTFS_DEFAULT_NUM_USERS;
module_param(ecryptfs_number_of_users, uint, 0);
MODULE_PARM_DESC(ecryptfs_number_of_users, "An estimate of the number of "
"concurrent users of eCryptfs");
void __ecryptfs_printk(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
if (fmt[1] == '7') { /* KERN_DEBUG */
if (ecryptfs_verbosity >= 1)
vprintk(fmt, args);
} else
vprintk(fmt, args);
va_end(args);
}
/*
* ecryptfs_init_lower_file
* @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with
* the lower dentry and the lower mount set
*
* eCryptfs only ever keeps a single open file for every lower
* inode. All I/O operations to the lower inode occur through that
* file. When the first eCryptfs dentry that interposes with the first
* lower dentry for that inode is created, this function creates the
* lower file struct and associates it with the eCryptfs
* inode. When all eCryptfs files associated with the inode are released, the
* file is closed.
*
* The lower file will be opened with read/write permissions, if
* possible. Otherwise, it is opened read-only.
*
* This function does nothing if a lower file is already
* associated with the eCryptfs inode.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_init_lower_file(struct dentry *dentry,
struct file **lower_file)
{
const struct cred *cred = current_cred();
const struct path *path = ecryptfs_dentry_to_lower_path(dentry);
int rc;
rc = ecryptfs_privileged_open(lower_file, path->dentry, path->mnt,
cred);
if (rc) {
printk(KERN_ERR "Error opening lower file "
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
"rc = [%d]\n", path->dentry, path->mnt, rc);
(*lower_file) = NULL;
}
return rc;
}
int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
int count, rc = 0;
inode_info = ecryptfs_inode_to_private(inode);
mutex_lock(&inode_info->lower_file_mutex);
count = atomic_inc_return(&inode_info->lower_file_count);
if (WARN_ON_ONCE(count < 1))
rc = -EINVAL;
else if (count == 1) {
rc = ecryptfs_init_lower_file(dentry,
&inode_info->lower_file);
if (rc)
atomic_set(&inode_info->lower_file_count, 0);
}
mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
void ecryptfs_put_lower_file(struct inode *inode)
{
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
&inode_info->lower_file_mutex)) {
filemap_write_and_wait(inode->i_mapping);
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
mutex_unlock(&inode_info->lower_file_mutex);
}
}
enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher,
ecryptfs_opt_ecryptfs_key_bytes,
ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata,
ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig,
ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes,
ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only,
ecryptfs_opt_check_dev_ruid,
ecryptfs_opt_err };
static const match_table_t tokens = {
{ecryptfs_opt_sig, "sig=%s"},
{ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"},
{ecryptfs_opt_cipher, "cipher=%s"},
{ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"},
{ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"},
{ecryptfs_opt_passthrough, "ecryptfs_passthrough"},
{ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"},
{ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"},
{ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"},
{ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"},
{ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"},
{ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"},
{ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"},
{ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"},
{ecryptfs_opt_err, NULL}
};
static int ecryptfs_init_global_auth_toks(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *global_auth_tok;
struct ecryptfs_auth_tok *auth_tok;
int rc = 0;
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
rc = ecryptfs_keyring_auth_tok_for_sig(
&global_auth_tok->global_auth_tok_key, &auth_tok,
global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Could not find valid key in user "
"session keyring for sig specified in mount "
"option: [%s]\n", global_auth_tok->sig);
global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID;
goto out;
} else {
global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID;
up_write(&(global_auth_tok->global_auth_tok_key)->sem);
}
}
out:
return rc;
}
static void ecryptfs_init_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
memset((void *)mount_crypt_stat, 0,
sizeof(struct ecryptfs_mount_crypt_stat));
INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list);
mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex);
mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED;
}
/**
* ecryptfs_parse_options
* @sbi: The ecryptfs super block
* @options: The options passed to the kernel
* @check_ruid: set to 1 if device uid should be checked against the ruid
*
* Parse mount options:
* debug=N - ecryptfs_verbosity level for debug output
* sig=XXX - description(signature) of the key to use
*
* Returns the dentry object of the lower-level (lower/interposed)
* directory; We want to mount our stackable file system on top of
* that lower directory.
*
* The signature of the key to use must be the description of a key
* already in the keyring. Mounting will fail if the key can not be
* found.
*
* Returns zero on success; non-zero on error
*/
static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
uid_t *check_ruid)
{
char *p;
int rc = 0;
int sig_set = 0;
int cipher_name_set = 0;
int fn_cipher_name_set = 0;
int cipher_key_bytes;
int cipher_key_bytes_set = 0;
int fn_cipher_key_bytes;
int fn_cipher_key_bytes_set = 0;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&sbi->mount_crypt_stat;
substring_t args[MAX_OPT_ARGS];
int token;
char *sig_src;
char *cipher_name_dst;
char *cipher_name_src;
char *fn_cipher_name_dst;
char *fn_cipher_name_src;
char *fnek_dst;
char *fnek_src;
char *cipher_key_bytes_src;
char *fn_cipher_key_bytes_src;
u8 cipher_code;
*check_ruid = 0;
if (!options) {
rc = -EINVAL;
goto out;
}
ecryptfs_init_mount_crypt_stat(mount_crypt_stat);
while ((p = strsep(&options, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case ecryptfs_opt_sig:
case ecryptfs_opt_ecryptfs_sig:
sig_src = args[0].from;
rc = ecryptfs_add_global_auth_tok(mount_crypt_stat,
sig_src, 0);
if (rc) {
printk(KERN_ERR "Error attempting to register "
"global sig; rc = [%d]\n", rc);
goto out;
}
sig_set = 1;
break;
case ecryptfs_opt_cipher:
case ecryptfs_opt_ecryptfs_cipher:
cipher_name_src = args[0].from;
cipher_name_dst =
mount_crypt_stat->
global_default_cipher_name;
strncpy(cipher_name_dst, cipher_name_src,
ECRYPTFS_MAX_CIPHER_NAME_SIZE);
cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
cipher_name_set = 1;
break;
case ecryptfs_opt_ecryptfs_key_bytes:
cipher_key_bytes_src = args[0].from;
cipher_key_bytes =
(int)simple_strtol(cipher_key_bytes_src,
&cipher_key_bytes_src, 0);
mount_crypt_stat->global_default_cipher_key_size =
cipher_key_bytes;
cipher_key_bytes_set = 1;
break;
case ecryptfs_opt_passthrough:
mount_crypt_stat->flags |=
ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED;
break;
case ecryptfs_opt_xattr_metadata:
mount_crypt_stat->flags |=
ECRYPTFS_XATTR_METADATA_ENABLED;
break;
case ecryptfs_opt_encrypted_view:
mount_crypt_stat->flags |=
ECRYPTFS_XATTR_METADATA_ENABLED;
mount_crypt_stat->flags |=
ECRYPTFS_ENCRYPTED_VIEW_ENABLED;
break;
case ecryptfs_opt_fnek_sig:
fnek_src = args[0].from;
fnek_dst =
mount_crypt_stat->global_default_fnek_sig;
strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX);
mount_crypt_stat->global_default_fnek_sig[
ECRYPTFS_SIG_SIZE_HEX] = '\0';
rc = ecryptfs_add_global_auth_tok(
mount_crypt_stat,
mount_crypt_stat->global_default_fnek_sig,
ECRYPTFS_AUTH_TOK_FNEK);
if (rc) {
printk(KERN_ERR "Error attempting to register "
"global fnek sig [%s]; rc = [%d]\n",
mount_crypt_stat->global_default_fnek_sig,
rc);
goto out;
}
mount_crypt_stat->flags |=
(ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES
| ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK);
break;
case ecryptfs_opt_fn_cipher:
fn_cipher_name_src = args[0].from;
fn_cipher_name_dst =
mount_crypt_stat->global_default_fn_cipher_name;
strncpy(fn_cipher_name_dst, fn_cipher_name_src,
ECRYPTFS_MAX_CIPHER_NAME_SIZE);
mount_crypt_stat->global_default_fn_cipher_name[
ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
fn_cipher_name_set = 1;
break;
case ecryptfs_opt_fn_cipher_key_bytes:
fn_cipher_key_bytes_src = args[0].from;
fn_cipher_key_bytes =
(int)simple_strtol(fn_cipher_key_bytes_src,
&fn_cipher_key_bytes_src, 0);
mount_crypt_stat->global_default_fn_cipher_key_bytes =
fn_cipher_key_bytes;
fn_cipher_key_bytes_set = 1;
break;
case ecryptfs_opt_unlink_sigs:
mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS;
break;
case ecryptfs_opt_mount_auth_tok_only:
mount_crypt_stat->flags |=
ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY;
break;
case ecryptfs_opt_check_dev_ruid:
*check_ruid = 1;
break;
case ecryptfs_opt_err:
default:
printk(KERN_WARNING
"%s: eCryptfs: unrecognized option [%s]\n",
__func__, p);
}
}
if (!sig_set) {
rc = -EINVAL;
ecryptfs_printk(KERN_ERR, "You must supply at least one valid "
"auth tok signature as a mount "
"parameter; see the eCryptfs README\n");
goto out;
}
if (!cipher_name_set) {
int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE);
strcpy(mount_crypt_stat->global_default_cipher_name,
ECRYPTFS_DEFAULT_CIPHER);
}
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !fn_cipher_name_set)
strcpy(mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_cipher_name);
if (!cipher_key_bytes_set)
mount_crypt_stat->global_default_cipher_key_size = 0;
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !fn_cipher_key_bytes_set)
mount_crypt_stat->global_default_fn_cipher_key_bytes =
mount_crypt_stat->global_default_cipher_key_size;
cipher_code = ecryptfs_code_for_cipher_string(
mount_crypt_stat->global_default_cipher_name,
mount_crypt_stat->global_default_cipher_key_size);
if (!cipher_code) {
ecryptfs_printk(KERN_ERR,
"eCryptfs doesn't support cipher: %s\n",
mount_crypt_stat->global_default_cipher_name);
rc = -EINVAL;
goto out;
}
mutex_lock(&key_tfm_list_mutex);
if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
NULL)) {
rc = ecryptfs_add_new_key_tfm(
NULL, mount_crypt_stat->global_default_cipher_name,
mount_crypt_stat->global_default_cipher_key_size);
if (rc) {
printk(KERN_ERR "Error attempting to initialize "
"cipher with name = [%s] and key size = [%td]; "
"rc = [%d]\n",
mount_crypt_stat->global_default_cipher_name,
mount_crypt_stat->global_default_cipher_key_size,
rc);
rc = -EINVAL;
mutex_unlock(&key_tfm_list_mutex);
goto out;
}
}
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
&& !ecryptfs_tfm_exists(
mount_crypt_stat->global_default_fn_cipher_name, NULL)) {
rc = ecryptfs_add_new_key_tfm(
NULL, mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_fn_cipher_key_bytes);
if (rc) {
printk(KERN_ERR "Error attempting to initialize "
"cipher with name = [%s] and key size = [%td]; "
"rc = [%d]\n",
mount_crypt_stat->global_default_fn_cipher_name,
mount_crypt_stat->global_default_fn_cipher_key_bytes,
rc);
rc = -EINVAL;
mutex_unlock(&key_tfm_list_mutex);
goto out;
}
}
mutex_unlock(&key_tfm_list_mutex);
rc = ecryptfs_init_global_auth_toks(mount_crypt_stat);
if (rc)
printk(KERN_WARNING "One or more global auth toks could not "
"properly register; rc = [%d]\n", rc);
out:
return rc;
}
struct kmem_cache *ecryptfs_sb_info_cache;
static struct file_system_type ecryptfs_fs_type;
/*
* ecryptfs_mount
* @fs_type: The filesystem type that the superblock should belong to
* @flags: The flags associated with the mount
* @dev_name: The path to mount over
* @raw_data: The options passed into the kernel
*/
static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
struct super_block *s;
struct ecryptfs_sb_info *sbi;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_dentry_info *root_info;
const char *err = "Getting sb failed";
struct inode *inode;
struct path path;
uid_t check_ruid;
int rc;
sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
if (!sbi) {
rc = -ENOMEM;
goto out;
}
if (!dev_name) {
rc = -EINVAL;
err = "Device name cannot be null";
goto out;
}
rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
if (rc) {
err = "Error parsing options";
goto out;
}
mount_crypt_stat = &sbi->mount_crypt_stat;
s = sget(fs_type, NULL, set_anon_super, flags, NULL);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
goto out;
}
rc = super_setup_bdi(s);
if (rc)
goto out1;
ecryptfs_set_superblock_private(s, sbi);
/* ->kill_sb() will take care of sbi after that point */
sbi = NULL;
s->s_op = &ecryptfs_sops;
s->s_xattr = ecryptfs_xattr_handlers;
s->s_d_op = &ecryptfs_dops;
err = "Reading sb failed";
rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
if (rc) {
ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
goto out1;
}
if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
rc = -EINVAL;
printk(KERN_ERR "Mount on filesystem of type "
"eCryptfs explicitly disallowed due to "
"known incompatibilities\n");
goto out_free;
}
if (is_idmapped_mnt(path.mnt)) {
rc = -EINVAL;
printk(KERN_ERR "Mounting on idmapped mounts currently disallowed\n");
goto out_free;
}
if (check_ruid && !uid_eq(d_inode(path.dentry)->i_uid, current_uid())) {
rc = -EPERM;
printk(KERN_ERR "Mount of device (uid: %d) not owned by "
"requested user (uid: %d)\n",
i_uid_read(d_inode(path.dentry)),
from_kuid(&init_user_ns, current_uid()));
goto out_free;
}
ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
/**
* Set the POSIX ACL flag based on whether they're enabled in the lower
* mount.
*/
s->s_flags = flags & ~SB_POSIXACL;
s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
/**
* Force a read-only eCryptfs mount when:
* 1) The lower mount is ro
* 2) The ecryptfs_encrypted_view mount option is specified
*/
if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
s->s_flags |= SB_RDONLY;
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
s->s_magic = ECRYPTFS_SUPER_MAGIC;
s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
rc = -EINVAL;
if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
pr_err("eCryptfs: maximum fs stacking depth exceeded\n");
goto out_free;
}
inode = ecryptfs_get_inode(d_inode(path.dentry), s);
rc = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free;
s->s_root = d_make_root(inode);
if (!s->s_root) {
rc = -ENOMEM;
goto out_free;
}
rc = -ENOMEM;
root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
if (!root_info)
goto out_free;
/* ->kill_sb() will take care of root_info */
ecryptfs_set_dentry_private(s->s_root, root_info);
root_info->lower_path = path;
s->s_flags |= SB_ACTIVE;
return dget(s->s_root);
out_free:
path_put(&path);
out1:
deactivate_locked_super(s);
out:
if (sbi) {
ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sbi);
}
printk(KERN_ERR "%s; rc = [%d]\n", err, rc);
return ERR_PTR(rc);
}
/**
* ecryptfs_kill_block_super
* @sb: The ecryptfs super block
*
* Used to bring the superblock down and free the private data.
*/
static void ecryptfs_kill_block_super(struct super_block *sb)
{
struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb);
kill_anon_super(sb);
if (!sb_info)
return;
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
}
static struct file_system_type ecryptfs_fs_type = {
.owner = THIS_MODULE,
.name = "ecryptfs",
.mount = ecryptfs_mount,
.kill_sb = ecryptfs_kill_block_super,
.fs_flags = 0
};
MODULE_ALIAS_FS("ecryptfs");
/*
* inode_info_init_once
*
* Initializes the ecryptfs_inode_info_cache when it is created
*/
static void
inode_info_init_once(void *vptr)
{
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
inode_init_once(&ei->vfs_inode);
}
static struct ecryptfs_cache_info {
struct kmem_cache **cache;
const char *name;
size_t size;
slab_flags_t flags;
void (*ctor)(void *obj);
} ecryptfs_cache_infos[] = {
{
.cache = &ecryptfs_auth_tok_list_item_cache,
.name = "ecryptfs_auth_tok_list_item",
.size = sizeof(struct ecryptfs_auth_tok_list_item),
},
{
.cache = &ecryptfs_file_info_cache,
.name = "ecryptfs_file_cache",
.size = sizeof(struct ecryptfs_file_info),
},
{
.cache = &ecryptfs_dentry_info_cache,
.name = "ecryptfs_dentry_info_cache",
.size = sizeof(struct ecryptfs_dentry_info),
},
{
.cache = &ecryptfs_inode_info_cache,
.name = "ecryptfs_inode_cache",
.size = sizeof(struct ecryptfs_inode_info),
.flags = SLAB_ACCOUNT,
.ctor = inode_info_init_once,
},
{
.cache = &ecryptfs_sb_info_cache,
.name = "ecryptfs_sb_cache",
.size = sizeof(struct ecryptfs_sb_info),
},
{
.cache = &ecryptfs_header_cache,
.name = "ecryptfs_headers",
.size = PAGE_SIZE,
},
{
.cache = &ecryptfs_xattr_cache,
.name = "ecryptfs_xattr_cache",
.size = PAGE_SIZE,
},
{
.cache = &ecryptfs_key_record_cache,
.name = "ecryptfs_key_record_cache",
.size = sizeof(struct ecryptfs_key_record),
},
{
.cache = &ecryptfs_key_sig_cache,
.name = "ecryptfs_key_sig_cache",
.size = sizeof(struct ecryptfs_key_sig),
},
{
.cache = &ecryptfs_global_auth_tok_cache,
.name = "ecryptfs_global_auth_tok_cache",
.size = sizeof(struct ecryptfs_global_auth_tok),
},
{
.cache = &ecryptfs_key_tfm_cache,
.name = "ecryptfs_key_tfm_cache",
.size = sizeof(struct ecryptfs_key_tfm),
},
};
static void ecryptfs_free_kmem_caches(void)
{
int i;
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
struct ecryptfs_cache_info *info;
info = &ecryptfs_cache_infos[i];
kmem_cache_destroy(*(info->cache));
}
}
/**
* ecryptfs_init_kmem_caches
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_init_kmem_caches(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
struct ecryptfs_cache_info *info;
info = &ecryptfs_cache_infos[i];
*(info->cache) = kmem_cache_create(info->name, info->size, 0,
SLAB_HWCACHE_ALIGN | info->flags, info->ctor);
if (!*(info->cache)) {
ecryptfs_free_kmem_caches();
ecryptfs_printk(KERN_WARNING, "%s: "
"kmem_cache_create failed\n",
info->name);
return -ENOMEM;
}
}
return 0;
}
static struct kobject *ecryptfs_kobj;
static ssize_t version_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK);
}
static struct kobj_attribute version_attr = __ATTR_RO(version);
static struct attribute *attributes[] = {
&version_attr.attr,
NULL,
};
static const struct attribute_group attr_group = {
.attrs = attributes,
};
static int do_sysfs_registration(void)
{
int rc;
ecryptfs_kobj = kobject_create_and_add("ecryptfs", fs_kobj);
if (!ecryptfs_kobj) {
printk(KERN_ERR "Unable to create ecryptfs kset\n");
rc = -ENOMEM;
goto out;
}
rc = sysfs_create_group(ecryptfs_kobj, &attr_group);
if (rc) {
printk(KERN_ERR
"Unable to create ecryptfs version attributes\n");
kobject_put(ecryptfs_kobj);
}
out:
return rc;
}
static void do_sysfs_unregistration(void)
{
sysfs_remove_group(ecryptfs_kobj, &attr_group);
kobject_put(ecryptfs_kobj);
}
static int __init ecryptfs_init(void)
{
int rc;
if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) {
rc = -EINVAL;
ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
"larger than the host's page size, and so "
"eCryptfs cannot run on this system. The "
"default eCryptfs extent size is [%u] bytes; "
"the page size is [%lu] bytes.\n",
ECRYPTFS_DEFAULT_EXTENT_SIZE,
(unsigned long)PAGE_SIZE);
goto out;
}
rc = ecryptfs_init_kmem_caches();
if (rc) {
printk(KERN_ERR
"Failed to allocate one or more kmem_cache objects\n");
goto out;
}
rc = do_sysfs_registration();
if (rc) {
printk(KERN_ERR "sysfs registration failed\n");
goto out_free_kmem_caches;
}
rc = ecryptfs_init_kthread();
if (rc) {
printk(KERN_ERR "%s: kthread initialization failed; "
"rc = [%d]\n", __func__, rc);
goto out_do_sysfs_unregistration;
}
rc = ecryptfs_init_messaging();
if (rc) {
printk(KERN_ERR "Failure occurred while attempting to "
"initialize the communications channel to "
"ecryptfsd\n");
goto out_destroy_kthread;
}
rc = ecryptfs_init_crypto();
if (rc) {
printk(KERN_ERR "Failure whilst attempting to init crypto; "
"rc = [%d]\n", rc);
goto out_release_messaging;
}
rc = register_filesystem(&ecryptfs_fs_type);
if (rc) {
printk(KERN_ERR "Failed to register filesystem\n");
goto out_destroy_crypto;
}
if (ecryptfs_verbosity > 0)
printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values "
"will be written to the syslog!\n", ecryptfs_verbosity);
goto out;
out_destroy_crypto:
ecryptfs_destroy_crypto();
out_release_messaging:
ecryptfs_release_messaging();
out_destroy_kthread:
ecryptfs_destroy_kthread();
out_do_sysfs_unregistration:
do_sysfs_unregistration();
out_free_kmem_caches:
ecryptfs_free_kmem_caches();
out:
return rc;
}
static void __exit ecryptfs_exit(void)
{
int rc;
rc = ecryptfs_destroy_crypto();
if (rc)
printk(KERN_ERR "Failure whilst attempting to destroy crypto; "
"rc = [%d]\n", rc);
ecryptfs_release_messaging();
ecryptfs_destroy_kthread();
do_sysfs_unregistration();
unregister_filesystem(&ecryptfs_fs_type);
ecryptfs_free_kmem_caches();
}
MODULE_AUTHOR("Michael A. Halcrow <[email protected]>");
MODULE_DESCRIPTION("eCryptfs");
MODULE_LICENSE("GPL");
module_init(ecryptfs_init)
module_exit(ecryptfs_exit)
| linux-master | fs/ecryptfs/main.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 2008 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
*/
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/mount.h>
#include "ecryptfs_kernel.h"
struct ecryptfs_open_req {
struct file **lower_file;
struct path path;
struct completion done;
struct list_head kthread_ctl_list;
};
static struct ecryptfs_kthread_ctl {
#define ECRYPTFS_KTHREAD_ZOMBIE 0x00000001
u32 flags;
struct mutex mux;
struct list_head req_list;
wait_queue_head_t wait;
} ecryptfs_kthread_ctl;
static struct task_struct *ecryptfs_kthread;
/**
* ecryptfs_threadfn
* @ignored: ignored
*
* The eCryptfs kernel thread that has the responsibility of getting
* the lower file with RW permissions.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_threadfn(void *ignored)
{
set_freezable();
while (1) {
struct ecryptfs_open_req *req;
wait_event_freezable(
ecryptfs_kthread_ctl.wait,
(!list_empty(&ecryptfs_kthread_ctl.req_list)
|| kthread_should_stop()));
mutex_lock(&ecryptfs_kthread_ctl.mux);
if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) {
mutex_unlock(&ecryptfs_kthread_ctl.mux);
goto out;
}
while (!list_empty(&ecryptfs_kthread_ctl.req_list)) {
req = list_first_entry(&ecryptfs_kthread_ctl.req_list,
struct ecryptfs_open_req,
kthread_ctl_list);
list_del(&req->kthread_ctl_list);
*req->lower_file = dentry_open(&req->path,
(O_RDWR | O_LARGEFILE), current_cred());
complete(&req->done);
}
mutex_unlock(&ecryptfs_kthread_ctl.mux);
}
out:
return 0;
}
int __init ecryptfs_init_kthread(void)
{
int rc = 0;
mutex_init(&ecryptfs_kthread_ctl.mux);
init_waitqueue_head(&ecryptfs_kthread_ctl.wait);
INIT_LIST_HEAD(&ecryptfs_kthread_ctl.req_list);
ecryptfs_kthread = kthread_run(&ecryptfs_threadfn, NULL,
"ecryptfs-kthread");
if (IS_ERR(ecryptfs_kthread)) {
rc = PTR_ERR(ecryptfs_kthread);
printk(KERN_ERR "%s: Failed to create kernel thread; rc = [%d]"
"\n", __func__, rc);
}
return rc;
}
void ecryptfs_destroy_kthread(void)
{
struct ecryptfs_open_req *req, *tmp;
mutex_lock(&ecryptfs_kthread_ctl.mux);
ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE;
list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list,
kthread_ctl_list) {
list_del(&req->kthread_ctl_list);
*req->lower_file = ERR_PTR(-EIO);
complete(&req->done);
}
mutex_unlock(&ecryptfs_kthread_ctl.mux);
kthread_stop(ecryptfs_kthread);
wake_up(&ecryptfs_kthread_ctl.wait);
}
/**
* ecryptfs_privileged_open
* @lower_file: Result of dentry_open by root on lower dentry
* @lower_dentry: Lower dentry for file to open
* @lower_mnt: Lower vfsmount for file to open
* @cred: credential to use for this call
*
* This function gets a r/w file opened against the lower dentry.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_privileged_open(struct file **lower_file,
struct dentry *lower_dentry,
struct vfsmount *lower_mnt,
const struct cred *cred)
{
struct ecryptfs_open_req req;
int flags = O_LARGEFILE;
int rc = 0;
init_completion(&req.done);
req.lower_file = lower_file;
req.path.dentry = lower_dentry;
req.path.mnt = lower_mnt;
/* Corresponding dput() and mntput() are done when the
* lower file is fput() when all eCryptfs files for the inode are
* released. */
flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
(*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
goto out;
if ((flags & O_ACCMODE) == O_RDONLY) {
rc = PTR_ERR((*lower_file));
goto out;
}
mutex_lock(&ecryptfs_kthread_ctl.mux);
if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) {
rc = -EIO;
mutex_unlock(&ecryptfs_kthread_ctl.mux);
printk(KERN_ERR "%s: We are in the middle of shutting down; "
"aborting privileged request to open lower file\n",
__func__);
goto out;
}
list_add_tail(&req.kthread_ctl_list, &ecryptfs_kthread_ctl.req_list);
mutex_unlock(&ecryptfs_kthread_ctl.mux);
wake_up(&ecryptfs_kthread_ctl.wait);
wait_for_completion(&req.done);
if (IS_ERR(*lower_file))
rc = PTR_ERR(*lower_file);
out:
return rc;
}
| linux-master | fs/ecryptfs/kthread.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include "ecryptfs_kernel.h"
/**
* ecryptfs_write_lower
* @ecryptfs_inode: The eCryptfs inode
* @data: Data to write
* @offset: Byte offset in the lower file to which to write the data
* @size: Number of bytes from @data to write at @offset in the lower
* file
*
* Write data to the lower file.
*
* Returns bytes written on success; less than zero on error
*/
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size)
{
struct file *lower_file;
ssize_t rc;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
rc = kernel_write(lower_file, data, size, &offset);
mark_inode_dirty_sync(ecryptfs_inode);
return rc;
}
/**
* ecryptfs_write_lower_page_segment
* @ecryptfs_inode: The eCryptfs inode
* @page_for_lower: The page containing the data to be written to the
* lower file
* @offset_in_page: The offset in the @page_for_lower from which to
* start writing the data
* @size: The amount of data from @page_for_lower to write to the
* lower file
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to write
* the contents of @page_for_lower to the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
struct page *page_for_lower,
size_t offset_in_page, size_t size)
{
char *virt;
loff_t offset;
int rc;
offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
+ offset_in_page);
virt = kmap_local_page(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
if (rc > 0)
rc = 0;
kunmap_local(virt);
return rc;
}
/**
* ecryptfs_write
* @ecryptfs_inode: The eCryptfs file into which to write
* @data: Virtual address where data to write is located
* @offset: Offset in the eCryptfs file at which to begin writing the
* data from @data
* @size: The number of bytes to write from @data
*
* Write an arbitrary amount of data to an arbitrary location in the
* eCryptfs inode page cache. This is done on a page-by-page, and then
* by an extent-by-extent, basis; individual extents are encrypted and
* written to the lower page cache (via VFS writes). This function
* takes care of all the address translation to locations in the lower
* filesystem; it also handles truncate events, writing out zeros
* where necessary.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
size_t size)
{
struct page *ecryptfs_page;
struct ecryptfs_crypt_stat *crypt_stat;
char *ecryptfs_page_virt;
loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
loff_t data_offset = 0;
loff_t pos;
int rc = 0;
crypt_stat = &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
/*
* if we are writing beyond current size, then start pos
* at the current size - we'll fill in zeros from there.
*/
if (offset > ecryptfs_file_size)
pos = ecryptfs_file_size;
else
pos = offset;
while (pos < (offset + size)) {
pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT);
size_t start_offset_in_page = (pos & ~PAGE_MASK);
size_t num_bytes = (PAGE_SIZE - start_offset_in_page);
loff_t total_remaining_bytes = ((offset + size) - pos);
if (fatal_signal_pending(current)) {
rc = -EINTR;
break;
}
if (num_bytes > total_remaining_bytes)
num_bytes = total_remaining_bytes;
if (pos < offset) {
/* remaining zeros to write, up to destination offset */
loff_t total_remaining_zeros = (offset - pos);
if (num_bytes > total_remaining_zeros)
num_bytes = total_remaining_zeros;
}
ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
ecryptfs_page_idx);
if (IS_ERR(ecryptfs_page)) {
rc = PTR_ERR(ecryptfs_page);
printk(KERN_ERR "%s: Error getting page at "
"index [%ld] from eCryptfs inode "
"mapping; rc = [%d]\n", __func__,
ecryptfs_page_idx, rc);
goto out;
}
ecryptfs_page_virt = kmap_local_page(ecryptfs_page);
/*
* pos: where we're now writing, offset: where the request was
* If current pos is before request, we are filling zeros
* If we are at or beyond request, we are writing the *data*
* If we're in a fresh page beyond eof, zero it in either case
*/
if (pos < offset || !start_offset_in_page) {
/* We are extending past the previous end of the file.
* Fill in zero values to the end of the page */
memset(((char *)ecryptfs_page_virt
+ start_offset_in_page), 0,
PAGE_SIZE - start_offset_in_page);
}
/* pos >= offset, we are now writing the data request */
if (pos >= offset) {
memcpy(((char *)ecryptfs_page_virt
+ start_offset_in_page),
(data + data_offset), num_bytes);
data_offset += num_bytes;
}
kunmap_local(ecryptfs_page_virt);
flush_dcache_page(ecryptfs_page);
SetPageUptodate(ecryptfs_page);
unlock_page(ecryptfs_page);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED)
rc = ecryptfs_encrypt_page(ecryptfs_page);
else
rc = ecryptfs_write_lower_page_segment(ecryptfs_inode,
ecryptfs_page,
start_offset_in_page,
data_offset);
put_page(ecryptfs_page);
if (rc) {
printk(KERN_ERR "%s: Error encrypting "
"page; rc = [%d]\n", __func__, rc);
goto out;
}
pos += num_bytes;
}
if (pos > ecryptfs_file_size) {
i_size_write(ecryptfs_inode, pos);
if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
int rc2;
rc2 = ecryptfs_write_inode_size_to_metadata(
ecryptfs_inode);
if (rc2) {
printk(KERN_ERR "Problem with "
"ecryptfs_write_inode_size_to_metadata; "
"rc = [%d]\n", rc2);
if (!rc)
rc = rc2;
goto out;
}
}
}
out:
return rc;
}
/**
* ecryptfs_read_lower
* @data: The read data is stored here by this function
* @offset: Byte offset in the lower file from which to read the data
* @size: Number of bytes to read from @offset of the lower file and
* store into @data
* @ecryptfs_inode: The eCryptfs inode
*
* Read @size bytes of data at byte offset @offset from the lower
* inode into memory location @data.
*
* Returns bytes read on success; 0 on EOF; less than zero on error
*/
int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
struct inode *ecryptfs_inode)
{
struct file *lower_file;
lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
if (!lower_file)
return -EIO;
return kernel_read(lower_file, data, size, &offset);
}
/**
* ecryptfs_read_lower_page_segment
* @page_for_ecryptfs: The page into which data for eCryptfs will be
* written
* @page_index: Page index in @page_for_ecryptfs from which to start
* writing
* @offset_in_page: Offset in @page_for_ecryptfs from which to start
* writing
* @size: The number of bytes to write into @page_for_ecryptfs
* @ecryptfs_inode: The eCryptfs inode
*
* Determines the byte offset in the file for the given page and
* offset within the page, maps the page, and makes the call to read
* the contents of @page_for_ecryptfs from the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
pgoff_t page_index,
size_t offset_in_page, size_t size,
struct inode *ecryptfs_inode)
{
char *virt;
loff_t offset;
int rc;
offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
virt = kmap_local_page(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
rc = 0;
kunmap_local(virt);
flush_dcache_page(page_for_ecryptfs);
return rc;
}
| linux-master | fs/ecryptfs/read_write.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2004 Erez Zadok
* Copyright (C) 2001-2004 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompson <[email protected]>
*/
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/fs_stack.h>
#include "ecryptfs_kernel.h"
/*
* ecryptfs_read_update_atime
*
* generic_file_read updates the atime of upper layer inode. But, it
* doesn't give us a chance to update the atime of the lower layer
* inode. This function is a wrapper to generic_file_read. It
* updates the atime of the lower level inode if generic_file_read
* returns without any errors. This is to be used only for file reads.
* The function to be used for directory reads is ecryptfs_read.
*/
static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
struct iov_iter *to)
{
ssize_t rc;
const struct path *path;
struct file *file = iocb->ki_filp;
rc = generic_file_read_iter(iocb, to);
if (rc >= 0) {
path = ecryptfs_dentry_to_lower_path(file->f_path.dentry);
touch_atime(path);
}
return rc;
}
/*
* ecryptfs_splice_read_update_atime
*
* filemap_splice_read updates the atime of upper layer inode. But, it
* doesn't give us a chance to update the atime of the lower layer inode. This
* function is a wrapper to generic_file_read. It updates the atime of the
* lower level inode if generic_file_read returns without any errors. This is
* to be used only for file reads. The function to be used for directory reads
* is ecryptfs_read.
*/
static ssize_t ecryptfs_splice_read_update_atime(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
ssize_t rc;
const struct path *path;
rc = filemap_splice_read(in, ppos, pipe, len, flags);
if (rc >= 0) {
path = ecryptfs_dentry_to_lower_path(in->f_path.dentry);
touch_atime(path);
}
return rc;
}
struct ecryptfs_getdents_callback {
struct dir_context ctx;
struct dir_context *caller;
struct super_block *sb;
int filldir_called;
int entries_written;
};
/* Inspired by generic filldir in fs/readdir.c */
static bool
ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
int lower_namelen, loff_t offset, u64 ino, unsigned int d_type)
{
struct ecryptfs_getdents_callback *buf =
container_of(ctx, struct ecryptfs_getdents_callback, ctx);
size_t name_size;
char *name;
int err;
bool res;
buf->filldir_called++;
err = ecryptfs_decode_and_decrypt_filename(&name, &name_size,
buf->sb, lower_name,
lower_namelen);
if (err) {
if (err != -EINVAL) {
ecryptfs_printk(KERN_DEBUG,
"%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n",
__func__, lower_name, err);
return false;
}
/* Mask -EINVAL errors as these are most likely due a plaintext
* filename present in the lower filesystem despite filename
* encryption being enabled. One unavoidable example would be
* the "lost+found" dentry in the root directory of an Ext4
* filesystem.
*/
return true;
}
buf->caller->pos = buf->ctx.pos;
res = dir_emit(buf->caller, name, name_size, ino, d_type);
kfree(name);
if (res)
buf->entries_written++;
return res;
}
/**
* ecryptfs_readdir
* @file: The eCryptfs directory file
* @ctx: The actor to feed the entries to
*/
static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
{
int rc;
struct file *lower_file;
struct inode *inode = file_inode(file);
struct ecryptfs_getdents_callback buf = {
.ctx.actor = ecryptfs_filldir,
.caller = ctx,
.sb = inode->i_sb,
};
lower_file = ecryptfs_file_to_lower(file);
rc = iterate_dir(lower_file, &buf.ctx);
ctx->pos = buf.ctx.pos;
if (rc >= 0 && (buf.entries_written || !buf.filldir_called))
fsstack_copy_attr_atime(inode, file_inode(lower_file));
return rc;
}
struct kmem_cache *ecryptfs_file_info_cache;
static int read_or_initialize_metadata(struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_crypt_stat *crypt_stat;
int rc;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
mount_crypt_stat = &ecryptfs_superblock_to_private(
inode->i_sb)->mount_crypt_stat;
mutex_lock(&crypt_stat->cs_mutex);
if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED &&
crypt_stat->flags & ECRYPTFS_KEY_VALID) {
rc = 0;
goto out;
}
rc = ecryptfs_read_metadata(dentry);
if (!rc)
goto out;
if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) {
crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
| ECRYPTFS_ENCRYPTED);
rc = 0;
goto out;
}
if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) &&
!i_size_read(ecryptfs_inode_to_lower(inode))) {
rc = ecryptfs_initialize_file(dentry, inode);
if (!rc)
goto out;
}
rc = -EIO;
out:
mutex_unlock(&crypt_stat->cs_mutex);
return rc;
}
static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file *lower_file = ecryptfs_file_to_lower(file);
/*
* Don't allow mmap on top of file systems that don't support it
* natively. If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
* allows recursive mounting, this will need to be extended.
*/
if (!lower_file->f_op->mmap)
return -ENODEV;
return generic_file_mmap(file, vma);
}
/**
* ecryptfs_open
* @inode: inode specifying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_open(struct inode *inode, struct file *file)
{
int rc = 0;
struct ecryptfs_crypt_stat *crypt_stat = NULL;
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
ecryptfs_set_file_private(file, file_info);
if (!file_info) {
ecryptfs_printk(KERN_ERR,
"Error attempting to allocate memory\n");
rc = -ENOMEM;
goto out;
}
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
mutex_lock(&crypt_stat->cs_mutex);
if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)) {
ecryptfs_printk(KERN_DEBUG, "Setting flags for stat...\n");
/* Policy code enabled in future release */
crypt_stat->flags |= (ECRYPTFS_POLICY_APPLIED
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
rc = ecryptfs_get_lower_file(ecryptfs_dentry, inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%pd]; rc = [%d]\n", __func__,
ecryptfs_dentry, rc);
goto out_free;
}
if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE)
== O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) {
rc = -EPERM;
printk(KERN_WARNING "%s: Lower file is RO; eCryptfs "
"file must hence be opened RO\n", __func__);
goto out_put;
}
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
rc = read_or_initialize_metadata(ecryptfs_dentry);
if (rc)
goto out_put;
ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
"[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
(unsigned long long)i_size_read(inode));
goto out;
out_put:
ecryptfs_put_lower_file(inode);
out_free:
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
out:
return rc;
}
/**
* ecryptfs_dir_open
* @inode: inode specifying file to open
* @file: Structure to return filled in
*
* Opens the file specified by inode.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_dir_open(struct inode *inode, struct file *file)
{
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
struct ecryptfs_file_info *file_info;
struct file *lower_file;
/* Released in ecryptfs_release or end of function if failure */
file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
ecryptfs_set_file_private(file, file_info);
if (unlikely(!file_info)) {
ecryptfs_printk(KERN_ERR,
"Error attempting to allocate memory\n");
return -ENOMEM;
}
lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
file->f_flags, current_cred());
if (IS_ERR(lower_file)) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%pd]; rc = [%ld]\n", __func__,
ecryptfs_dentry, PTR_ERR(lower_file));
kmem_cache_free(ecryptfs_file_info_cache, file_info);
return PTR_ERR(lower_file);
}
ecryptfs_set_file_lower(file, lower_file);
return 0;
}
static int ecryptfs_flush(struct file *file, fl_owner_t td)
{
struct file *lower_file = ecryptfs_file_to_lower(file);
if (lower_file->f_op->flush) {
filemap_write_and_wait(file->f_mapping);
return lower_file->f_op->flush(lower_file, td);
}
return 0;
}
static int ecryptfs_release(struct inode *inode, struct file *file)
{
ecryptfs_put_lower_file(inode);
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
return 0;
}
static int ecryptfs_dir_release(struct inode *inode, struct file *file)
{
fput(ecryptfs_file_to_lower(file));
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
return 0;
}
static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
{
return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
}
static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
int rc;
rc = file_write_and_wait(file);
if (rc)
return rc;
return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
}
static int ecryptfs_fasync(int fd, struct file *file, int flag)
{
int rc = 0;
struct file *lower_file = NULL;
lower_file = ecryptfs_file_to_lower(file);
if (lower_file->f_op->fasync)
rc = lower_file->f_op->fasync(fd, lower_file, flag);
return rc;
}
static long
ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct file *lower_file = ecryptfs_file_to_lower(file);
long rc = -ENOTTY;
if (!lower_file->f_op->unlocked_ioctl)
return rc;
switch (cmd) {
case FITRIM:
case FS_IOC_GETFLAGS:
case FS_IOC_SETFLAGS:
case FS_IOC_GETVERSION:
case FS_IOC_SETVERSION:
rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
return rc;
default:
return rc;
}
}
#ifdef CONFIG_COMPAT
static long
ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct file *lower_file = ecryptfs_file_to_lower(file);
long rc = -ENOIOCTLCMD;
if (!lower_file->f_op->compat_ioctl)
return rc;
switch (cmd) {
case FITRIM:
case FS_IOC32_GETFLAGS:
case FS_IOC32_SETFLAGS:
case FS_IOC32_GETVERSION:
case FS_IOC32_SETVERSION:
rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
fsstack_copy_attr_all(file_inode(file), file_inode(lower_file));
return rc;
default:
return rc;
}
}
#endif
const struct file_operations ecryptfs_dir_fops = {
.iterate_shared = ecryptfs_readdir,
.read = generic_read_dir,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
.open = ecryptfs_dir_open,
.release = ecryptfs_dir_release,
.fsync = ecryptfs_fsync,
.llseek = ecryptfs_dir_llseek,
};
const struct file_operations ecryptfs_main_fops = {
.llseek = generic_file_llseek,
.read_iter = ecryptfs_read_update_atime,
.write_iter = generic_file_write_iter,
.unlocked_ioctl = ecryptfs_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
.mmap = ecryptfs_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
.fsync = ecryptfs_fsync,
.fasync = ecryptfs_fasync,
.splice_read = ecryptfs_splice_read_update_atime,
};
| linux-master | fs/ecryptfs/file.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2003 Erez Zadok
* Copyright (C) 2001-2003 Stony Brook University
* Copyright (C) 2004-2006 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
*/
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/fs_stack.h>
#include <linux/slab.h>
#include "ecryptfs_kernel.h"
/**
* ecryptfs_d_revalidate - revalidate an ecryptfs dentry
* @dentry: The ecryptfs dentry
* @flags: lookup flags
*
* Called when the VFS needs to revalidate a dentry. This
* is called whenever a name lookup finds a dentry in the
* dcache. Most filesystems leave this as NULL, because all their
* dentries in the dcache are valid.
*
* Returns 1 if valid, 0 otherwise.
*
*/
static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
int rc = 1;
if (flags & LOOKUP_RCU)
return -ECHILD;
if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
if (d_really_is_positive(dentry)) {
struct inode *inode = d_inode(dentry);
fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
if (!inode->i_nlink)
return 0;
}
return rc;
}
struct kmem_cache *ecryptfs_dentry_info_cache;
static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
{
kmem_cache_free(ecryptfs_dentry_info_cache,
container_of(head, struct ecryptfs_dentry_info, rcu));
}
/**
* ecryptfs_d_release
* @dentry: The ecryptfs dentry
*
* Called when a dentry is really deallocated.
*/
static void ecryptfs_d_release(struct dentry *dentry)
{
struct ecryptfs_dentry_info *p = dentry->d_fsdata;
if (p) {
path_put(&p->lower_path);
call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
}
}
const struct dentry_operations ecryptfs_dops = {
.d_revalidate = ecryptfs_d_revalidate,
.d_release = ecryptfs_d_release,
};
| linux-master | fs/ecryptfs/dentry.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2004 Erez Zadok
* Copyright (C) 2001-2004 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompson <[email protected]>
*/
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/compiler.h>
#include <linux/key.h>
#include <linux/namei.h>
#include <linux/file.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/xattr.h>
#include "ecryptfs_kernel.h"
#define DECRYPT 0
#define ENCRYPT 1
/**
* ecryptfs_from_hex
* @dst: Buffer to take the bytes from src hex; must be at least of
* size (src_size / 2)
* @src: Buffer to be converted from a hex string representation to raw value
* @dst_size: size of dst buffer, or number of hex characters pairs to convert
*/
void ecryptfs_from_hex(char *dst, char *src, int dst_size)
{
int x;
char tmp[3] = { 0, };
for (x = 0; x < dst_size; x++) {
tmp[0] = src[x * 2];
tmp[1] = src[x * 2 + 1];
dst[x] = (unsigned char)simple_strtol(tmp, NULL, 16);
}
}
/**
* ecryptfs_calculate_md5 - calculates the md5 of @src
* @dst: Pointer to 16 bytes of allocated memory
* @crypt_stat: Pointer to crypt_stat struct for the current inode
* @src: Data to be md5'd
* @len: Length of @src
*
* Uses the allocated crypto context that crypt_stat references to
* generate the MD5 sum of the contents of src.
*/
static int ecryptfs_calculate_md5(char *dst,
struct ecryptfs_crypt_stat *crypt_stat,
char *src, int len)
{
int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst);
if (rc) {
printk(KERN_ERR
"%s: Error computing crypto hash; rc = [%d]\n",
__func__, rc);
goto out;
}
out:
return rc;
}
static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name,
char *cipher_name,
char *chaining_modifier)
{
int cipher_name_len = strlen(cipher_name);
int chaining_modifier_len = strlen(chaining_modifier);
int algified_name_len;
int rc;
algified_name_len = (chaining_modifier_len + cipher_name_len + 3);
(*algified_name) = kmalloc(algified_name_len, GFP_KERNEL);
if (!(*algified_name)) {
rc = -ENOMEM;
goto out;
}
snprintf((*algified_name), algified_name_len, "%s(%s)",
chaining_modifier, cipher_name);
rc = 0;
out:
return rc;
}
/**
* ecryptfs_derive_iv
* @iv: destination for the derived iv vale
* @crypt_stat: Pointer to crypt_stat struct for the current inode
* @offset: Offset of the extent whose IV we are to derive
*
* Generate the initialization vector from the given root IV and page
* offset.
*
* Returns zero on success; non-zero on error.
*/
int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
loff_t offset)
{
int rc = 0;
char dst[MD5_DIGEST_SIZE];
char src[ECRYPTFS_MAX_IV_BYTES + 16];
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "root iv:\n");
ecryptfs_dump_hex(crypt_stat->root_iv, crypt_stat->iv_bytes);
}
/* TODO: It is probably secure to just cast the least
* significant bits of the root IV into an unsigned long and
* add the offset to that rather than go through all this
* hashing business. -Halcrow */
memcpy(src, crypt_stat->root_iv, crypt_stat->iv_bytes);
memset((src + crypt_stat->iv_bytes), 0, 16);
snprintf((src + crypt_stat->iv_bytes), 16, "%lld", offset);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "source:\n");
ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16));
}
rc = ecryptfs_calculate_md5(dst, crypt_stat, src,
(crypt_stat->iv_bytes + 16));
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
"MD5 while generating IV for a page\n");
goto out;
}
memcpy(iv, dst, crypt_stat->iv_bytes);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "derived iv:\n");
ecryptfs_dump_hex(iv, crypt_stat->iv_bytes);
}
out:
return rc;
}
/**
* ecryptfs_init_crypt_stat
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
*
* Initialize the crypt_stat structure.
*/
int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{
struct crypto_shash *tfm;
int rc;
tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
if (IS_ERR(tfm)) {
rc = PTR_ERR(tfm);
ecryptfs_printk(KERN_ERR, "Error attempting to "
"allocate crypto context; rc = [%d]\n",
rc);
return rc;
}
memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
INIT_LIST_HEAD(&crypt_stat->keysig_list);
mutex_init(&crypt_stat->keysig_list_mutex);
mutex_init(&crypt_stat->cs_mutex);
mutex_init(&crypt_stat->cs_tfm_mutex);
crypt_stat->hash_tfm = tfm;
crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED;
return 0;
}
/**
* ecryptfs_destroy_crypt_stat
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
*
* Releases all memory associated with a crypt_stat struct.
*/
void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{
struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
crypto_free_skcipher(crypt_stat->tfm);
crypto_free_shash(crypt_stat->hash_tfm);
list_for_each_entry_safe(key_sig, key_sig_tmp,
&crypt_stat->keysig_list, crypt_stat_list) {
list_del(&key_sig->crypt_stat_list);
kmem_cache_free(ecryptfs_key_sig_cache, key_sig);
}
memset(crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
}
void ecryptfs_destroy_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *auth_tok, *auth_tok_tmp;
if (!(mount_crypt_stat->flags & ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED))
return;
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry_safe(auth_tok, auth_tok_tmp,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
list_del(&auth_tok->mount_crypt_stat_list);
if (!(auth_tok->flags & ECRYPTFS_AUTH_TOK_INVALID))
key_put(auth_tok->global_auth_tok_key);
kmem_cache_free(ecryptfs_global_auth_tok_cache, auth_tok);
}
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
memset(mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat));
}
/**
* virt_to_scatterlist
* @addr: Virtual address
* @size: Size of data; should be an even multiple of the block size
* @sg: Pointer to scatterlist array; set to NULL to obtain only
* the number of scatterlist structs required in array
* @sg_size: Max array size
*
* Fills in a scatterlist array with page references for a passed
* virtual address.
*
* Returns the number of scatterlist structs in array used
*/
int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
int sg_size)
{
int i = 0;
struct page *pg;
int offset;
int remainder_of_page;
sg_init_table(sg, sg_size);
while (size > 0 && i < sg_size) {
pg = virt_to_page(addr);
offset = offset_in_page(addr);
sg_set_page(&sg[i], pg, 0, offset);
remainder_of_page = PAGE_SIZE - offset;
if (size >= remainder_of_page) {
sg[i].length = remainder_of_page;
addr += remainder_of_page;
size -= remainder_of_page;
} else {
sg[i].length = size;
addr += size;
size = 0;
}
i++;
}
if (size > 0)
return -ENOMEM;
return i;
}
/**
* crypt_scatterlist
* @crypt_stat: Pointer to the crypt_stat struct to initialize.
* @dst_sg: Destination of the data after performing the crypto operation
* @src_sg: Data to be encrypted or decrypted
* @size: Length of data
* @iv: IV to use
* @op: ENCRYPT or DECRYPT to indicate the desired operation
*
* Returns the number of bytes encrypted or decrypted; negative value on error
*/
static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct scatterlist *dst_sg,
struct scatterlist *src_sg, int size,
unsigned char *iv, int op)
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(ecr);
int rc = 0;
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
ecryptfs_dump_hex(crypt_stat->key,
crypt_stat->key_size);
}
mutex_lock(&crypt_stat->cs_tfm_mutex);
req = skcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
if (!req) {
mutex_unlock(&crypt_stat->cs_tfm_mutex);
rc = -ENOMEM;
goto out;
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &ecr);
/* Consider doing this once, when the file is opened */
if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
rc = crypto_skcipher_setkey(crypt_stat->tfm, crypt_stat->key,
crypt_stat->key_size);
if (rc) {
ecryptfs_printk(KERN_ERR,
"Error setting key; rc = [%d]\n",
rc);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
rc = -EINVAL;
goto out;
}
crypt_stat->flags |= ECRYPTFS_KEY_SET;
}
mutex_unlock(&crypt_stat->cs_tfm_mutex);
skcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
rc = op == ENCRYPT ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
rc = crypto_wait_req(rc, &ecr);
out:
skcipher_request_free(req);
return rc;
}
/*
* lower_offset_for_page
*
* Convert an eCryptfs page index into a lower byte offset
*/
static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
struct page *page)
{
return ecryptfs_lower_header_size(crypt_stat) +
((loff_t)page->index << PAGE_SHIFT);
}
/**
* crypt_extent
* @crypt_stat: crypt_stat containing cryptographic context for the
* encryption operation
* @dst_page: The page to write the result into
* @src_page: The page to read from
* @extent_offset: Page extent offset for use in generating IV
* @op: ENCRYPT or DECRYPT to indicate the desired operation
*
* Encrypts or decrypts one extent of data.
*
* Return zero on success; non-zero otherwise
*/
static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat,
struct page *dst_page,
struct page *src_page,
unsigned long extent_offset, int op)
{
pgoff_t page_index = op == ENCRYPT ? src_page->index : dst_page->index;
loff_t extent_base;
char extent_iv[ECRYPTFS_MAX_IV_BYTES];
struct scatterlist src_sg, dst_sg;
size_t extent_size = crypt_stat->extent_size;
int rc;
extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size));
rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
(extent_base + extent_offset));
if (rc) {
ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for "
"extent [0x%.16llx]; rc = [%d]\n",
(unsigned long long)(extent_base + extent_offset), rc);
goto out;
}
sg_init_table(&src_sg, 1);
sg_init_table(&dst_sg, 1);
sg_set_page(&src_sg, src_page, extent_size,
extent_offset * extent_size);
sg_set_page(&dst_sg, dst_page, extent_size,
extent_offset * extent_size);
rc = crypt_scatterlist(crypt_stat, &dst_sg, &src_sg, extent_size,
extent_iv, op);
if (rc < 0) {
printk(KERN_ERR "%s: Error attempting to crypt page with "
"page_index = [%ld], extent_offset = [%ld]; "
"rc = [%d]\n", __func__, page_index, extent_offset, rc);
goto out;
}
rc = 0;
out:
return rc;
}
/**
* ecryptfs_encrypt_page
* @page: Page mapped from the eCryptfs inode for the file; contains
* decrypted content that needs to be encrypted (to a temporary
* page; not in place) and written out to the lower file
*
* Encrypt an eCryptfs page. This is done on a per-extent basis. Note
* that eCryptfs pages may straddle the lower pages -- for instance,
* if the file was created on a machine with an 8K page size
* (resulting in an 8K header), and then the file is copied onto a
* host with a 32K page size, then when reading page 0 of the eCryptfs
* file, 24K of page 0 of the lower file will be read and decrypted,
* and then 8K of page 1 of the lower file will be read and decrypted.
*
* Returns zero on success; negative on error
*/
int ecryptfs_encrypt_page(struct page *page)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
char *enc_extent_virt;
struct page *enc_extent_page = NULL;
loff_t extent_offset;
loff_t lower_offset;
int rc = 0;
ecryptfs_inode = page->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
enc_extent_page = alloc_page(GFP_USER);
if (!enc_extent_page) {
rc = -ENOMEM;
ecryptfs_printk(KERN_ERR, "Error allocating memory for "
"encrypted extent\n");
goto out;
}
for (extent_offset = 0;
extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
extent_offset++) {
rc = crypt_extent(crypt_stat, enc_extent_page, page,
extent_offset, ENCRYPT);
if (rc) {
printk(KERN_ERR "%s: Error encrypting extent; "
"rc = [%d]\n", __func__, rc);
goto out;
}
}
lower_offset = lower_offset_for_page(crypt_stat, page);
enc_extent_virt = kmap_local_page(enc_extent_page);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
PAGE_SIZE);
kunmap_local(enc_extent_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to write lower page; rc = [%d]\n",
rc);
goto out;
}
rc = 0;
out:
if (enc_extent_page) {
__free_page(enc_extent_page);
}
return rc;
}
/**
* ecryptfs_decrypt_page
* @page: Page mapped from the eCryptfs inode for the file; data read
* and decrypted from the lower file will be written into this
* page
*
* Decrypt an eCryptfs page. This is done on a per-extent basis. Note
* that eCryptfs pages may straddle the lower pages -- for instance,
* if the file was created on a machine with an 8K page size
* (resulting in an 8K header), and then the file is copied onto a
* host with a 32K page size, then when reading page 0 of the eCryptfs
* file, 24K of page 0 of the lower file will be read and decrypted,
* and then 8K of page 1 of the lower file will be read and decrypted.
*
* Returns zero on success; negative on error
*/
int ecryptfs_decrypt_page(struct page *page)
{
struct inode *ecryptfs_inode;
struct ecryptfs_crypt_stat *crypt_stat;
char *page_virt;
unsigned long extent_offset;
loff_t lower_offset;
int rc = 0;
ecryptfs_inode = page->mapping->host;
crypt_stat =
&(ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat);
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
lower_offset = lower_offset_for_page(crypt_stat, page);
page_virt = kmap_local_page(page);
rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
ecryptfs_inode);
kunmap_local(page_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to read lower page; rc = [%d]\n",
rc);
goto out;
}
for (extent_offset = 0;
extent_offset < (PAGE_SIZE / crypt_stat->extent_size);
extent_offset++) {
rc = crypt_extent(crypt_stat, page, page,
extent_offset, DECRYPT);
if (rc) {
printk(KERN_ERR "%s: Error decrypting extent; "
"rc = [%d]\n", __func__, rc);
goto out;
}
}
out:
return rc;
}
#define ECRYPTFS_MAX_SCATTERLIST_LEN 4
/**
* ecryptfs_init_crypt_ctx
* @crypt_stat: Uninitialized crypt stats structure
*
* Initialize the crypto context.
*
* TODO: Performance: Keep a cache of initialized cipher contexts;
* only init if needed
*/
int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
{
char *full_alg_name;
int rc = -EINVAL;
ecryptfs_printk(KERN_DEBUG,
"Initializing cipher [%s]; strlen = [%d]; "
"key_size_bits = [%zd]\n",
crypt_stat->cipher, (int)strlen(crypt_stat->cipher),
crypt_stat->key_size << 3);
mutex_lock(&crypt_stat->cs_tfm_mutex);
if (crypt_stat->tfm) {
rc = 0;
goto out_unlock;
}
rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name,
crypt_stat->cipher, "cbc");
if (rc)
goto out_unlock;
crypt_stat->tfm = crypto_alloc_skcipher(full_alg_name, 0, 0);
if (IS_ERR(crypt_stat->tfm)) {
rc = PTR_ERR(crypt_stat->tfm);
crypt_stat->tfm = NULL;
ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): "
"Error initializing cipher [%s]\n",
full_alg_name);
goto out_free;
}
crypto_skcipher_set_flags(crypt_stat->tfm,
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
rc = 0;
out_free:
kfree(full_alg_name);
out_unlock:
mutex_unlock(&crypt_stat->cs_tfm_mutex);
return rc;
}
static void set_extent_mask_and_shift(struct ecryptfs_crypt_stat *crypt_stat)
{
int extent_size_tmp;
crypt_stat->extent_mask = 0xFFFFFFFF;
crypt_stat->extent_shift = 0;
if (crypt_stat->extent_size == 0)
return;
extent_size_tmp = crypt_stat->extent_size;
while ((extent_size_tmp & 0x01) == 0) {
extent_size_tmp >>= 1;
crypt_stat->extent_mask <<= 1;
crypt_stat->extent_shift++;
}
}
void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
{
/* Default values; may be overwritten as we are parsing the
* packets. */
crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE;
set_extent_mask_and_shift(crypt_stat);
crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else {
if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
crypt_stat->metadata_size =
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
crypt_stat->metadata_size = PAGE_SIZE;
}
}
/*
* ecryptfs_compute_root_iv
*
* On error, sets the root IV to all 0's.
*/
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat)
{
int rc = 0;
char dst[MD5_DIGEST_SIZE];
BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE);
BUG_ON(crypt_stat->iv_bytes <= 0);
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
rc = -EINVAL;
ecryptfs_printk(KERN_WARNING, "Session key not valid; "
"cannot generate root IV\n");
goto out;
}
rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key,
crypt_stat->key_size);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
"MD5 while generating root IV\n");
goto out;
}
memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
out:
if (rc) {
memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes);
crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING;
}
return rc;
}
static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat)
{
get_random_bytes(crypt_stat->key, crypt_stat->key_size);
crypt_stat->flags |= ECRYPTFS_KEY_VALID;
ecryptfs_compute_root_iv(crypt_stat);
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Generated new session key:\n");
ecryptfs_dump_hex(crypt_stat->key,
crypt_stat->key_size);
}
}
/**
* ecryptfs_copy_mount_wide_flags_to_inode_flags
* @crypt_stat: The inode's cryptographic context
* @mount_crypt_stat: The mount point's cryptographic context
*
* This function propagates the mount-wide flags to individual inode
* flags.
*/
static void ecryptfs_copy_mount_wide_flags_to_inode_flags(
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED)
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
crypt_stat->flags |= ECRYPTFS_VIEW_AS_ENCRYPTED;
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
crypt_stat->flags |= ECRYPTFS_ENCRYPT_FILENAMES;
if (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)
crypt_stat->flags |= ECRYPTFS_ENCFN_USE_MOUNT_FNEK;
else if (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_FEK)
crypt_stat->flags |= ECRYPTFS_ENCFN_USE_FEK;
}
}
static int ecryptfs_copy_mount_wide_sigs_to_inode_sigs(
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct ecryptfs_global_auth_tok *global_auth_tok;
int rc = 0;
mutex_lock(&crypt_stat->keysig_list_mutex);
mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
list_for_each_entry(global_auth_tok,
&mount_crypt_stat->global_auth_tok_list,
mount_crypt_stat_list) {
if (global_auth_tok->flags & ECRYPTFS_AUTH_TOK_FNEK)
continue;
rc = ecryptfs_add_keysig(crypt_stat, global_auth_tok->sig);
if (rc) {
printk(KERN_ERR "Error adding keysig; rc = [%d]\n", rc);
goto out;
}
}
out:
mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
mutex_unlock(&crypt_stat->keysig_list_mutex);
return rc;
}
/**
* ecryptfs_set_default_crypt_stat_vals
* @crypt_stat: The inode's cryptographic context
* @mount_crypt_stat: The mount point's cryptographic context
*
* Default values in the event that policy does not override them.
*/
static void ecryptfs_set_default_crypt_stat_vals(
struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
ecryptfs_set_default_sizes(crypt_stat);
strcpy(crypt_stat->cipher, ECRYPTFS_DEFAULT_CIPHER);
crypt_stat->key_size = ECRYPTFS_DEFAULT_KEY_BYTES;
crypt_stat->flags &= ~(ECRYPTFS_KEY_VALID);
crypt_stat->file_version = ECRYPTFS_FILE_VERSION;
crypt_stat->mount_crypt_stat = mount_crypt_stat;
}
/**
* ecryptfs_new_file_context
* @ecryptfs_inode: The eCryptfs inode
*
* If the crypto context for the file has not yet been established,
* this is where we do that. Establishing a new crypto context
* involves the following decisions:
* - What cipher to use?
* - What set of authentication tokens to use?
* Here we just worry about getting enough information into the
* authentication tokens so that we know that they are available.
* We associate the available authentication tokens with the new file
* via the set of signatures in the crypt_stat struct. Later, when
* the headers are actually written out, we may again defer to
* userspace to perform the encryption of the session key; for the
* foreseeable future, this will be the case with public key packets.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_inode->i_sb)->mount_crypt_stat;
int cipher_name_len;
int rc = 0;
ecryptfs_set_default_crypt_stat_vals(crypt_stat, mount_crypt_stat);
crypt_stat->flags |= (ECRYPTFS_ENCRYPTED | ECRYPTFS_KEY_VALID);
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
rc = ecryptfs_copy_mount_wide_sigs_to_inode_sigs(crypt_stat,
mount_crypt_stat);
if (rc) {
printk(KERN_ERR "Error attempting to copy mount-wide key sigs "
"to the inode key sigs; rc = [%d]\n", rc);
goto out;
}
cipher_name_len =
strlen(mount_crypt_stat->global_default_cipher_name);
memcpy(crypt_stat->cipher,
mount_crypt_stat->global_default_cipher_name,
cipher_name_len);
crypt_stat->cipher[cipher_name_len] = '\0';
crypt_stat->key_size =
mount_crypt_stat->global_default_cipher_key_size;
ecryptfs_generate_new_key(crypt_stat);
rc = ecryptfs_init_crypt_ctx(crypt_stat);
if (rc)
ecryptfs_printk(KERN_ERR, "Error initializing cryptographic "
"context for cipher [%s]: rc = [%d]\n",
crypt_stat->cipher, rc);
out:
return rc;
}
/**
* ecryptfs_validate_marker - check for the ecryptfs marker
* @data: The data block in which to check
*
* Returns zero if marker found; -EINVAL if not found
*/
static int ecryptfs_validate_marker(char *data)
{
u32 m_1, m_2;
m_1 = get_unaligned_be32(data);
m_2 = get_unaligned_be32(data + 4);
if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2)
return 0;
ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; "
"MAGIC_ECRYPTFS_MARKER = [0x%.8x]\n", m_1, m_2,
MAGIC_ECRYPTFS_MARKER);
ecryptfs_printk(KERN_DEBUG, "(m_1 ^ MAGIC_ECRYPTFS_MARKER) = "
"[0x%.8x]\n", (m_1 ^ MAGIC_ECRYPTFS_MARKER));
return -EINVAL;
}
struct ecryptfs_flag_map_elem {
u32 file_flag;
u32 local_flag;
};
/* Add support for additional flags by adding elements here. */
static struct ecryptfs_flag_map_elem ecryptfs_flag_map[] = {
{0x00000001, ECRYPTFS_ENABLE_HMAC},
{0x00000002, ECRYPTFS_ENCRYPTED},
{0x00000004, ECRYPTFS_METADATA_IN_XATTR},
{0x00000008, ECRYPTFS_ENCRYPT_FILENAMES}
};
/**
* ecryptfs_process_flags
* @crypt_stat: The cryptographic context
* @page_virt: Source data to be parsed
* @bytes_read: Updated with the number of bytes read
*/
static void ecryptfs_process_flags(struct ecryptfs_crypt_stat *crypt_stat,
char *page_virt, int *bytes_read)
{
int i;
u32 flags;
flags = get_unaligned_be32(page_virt);
for (i = 0; i < ARRAY_SIZE(ecryptfs_flag_map); i++)
if (flags & ecryptfs_flag_map[i].file_flag) {
crypt_stat->flags |= ecryptfs_flag_map[i].local_flag;
} else
crypt_stat->flags &= ~(ecryptfs_flag_map[i].local_flag);
/* Version is in top 8 bits of the 32-bit flag vector */
crypt_stat->file_version = ((flags >> 24) & 0xFF);
(*bytes_read) = 4;
}
/**
* write_ecryptfs_marker
* @page_virt: The pointer to in a page to begin writing the marker
* @written: Number of bytes written
*
* Marker = 0x3c81b7f5
*/
static void write_ecryptfs_marker(char *page_virt, size_t *written)
{
u32 m_1, m_2;
get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER);
put_unaligned_be32(m_1, page_virt);
page_virt += (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2);
put_unaligned_be32(m_2, page_virt);
(*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
}
void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written)
{
u32 flags = 0;
int i;
for (i = 0; i < ARRAY_SIZE(ecryptfs_flag_map); i++)
if (crypt_stat->flags & ecryptfs_flag_map[i].local_flag)
flags |= ecryptfs_flag_map[i].file_flag;
/* Version is in top 8 bits of the 32-bit flag vector */
flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000);
put_unaligned_be32(flags, page_virt);
(*written) = 4;
}
struct ecryptfs_cipher_code_str_map_elem {
char cipher_str[16];
u8 cipher_code;
};
/* Add support for additional ciphers by adding elements here. The
* cipher_code is whatever OpenPGP applications use to identify the
* ciphers. List in order of probability. */
static struct ecryptfs_cipher_code_str_map_elem
ecryptfs_cipher_code_str_map[] = {
{"aes",RFC2440_CIPHER_AES_128 },
{"blowfish", RFC2440_CIPHER_BLOWFISH},
{"des3_ede", RFC2440_CIPHER_DES3_EDE},
{"cast5", RFC2440_CIPHER_CAST_5},
{"twofish", RFC2440_CIPHER_TWOFISH},
{"cast6", RFC2440_CIPHER_CAST_6},
{"aes", RFC2440_CIPHER_AES_192},
{"aes", RFC2440_CIPHER_AES_256}
};
/**
* ecryptfs_code_for_cipher_string
* @cipher_name: The string alias for the cipher
* @key_bytes: Length of key in bytes; used for AES code selection
*
* Returns zero on no match, or the cipher code on match
*/
u8 ecryptfs_code_for_cipher_string(char *cipher_name, size_t key_bytes)
{
int i;
u8 code = 0;
struct ecryptfs_cipher_code_str_map_elem *map =
ecryptfs_cipher_code_str_map;
if (strcmp(cipher_name, "aes") == 0) {
switch (key_bytes) {
case 16:
code = RFC2440_CIPHER_AES_128;
break;
case 24:
code = RFC2440_CIPHER_AES_192;
break;
case 32:
code = RFC2440_CIPHER_AES_256;
}
} else {
for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++)
if (strcmp(cipher_name, map[i].cipher_str) == 0) {
code = map[i].cipher_code;
break;
}
}
return code;
}
/**
* ecryptfs_cipher_code_to_string
* @str: Destination to write out the cipher name
* @cipher_code: The code to convert to cipher name string
*
* Returns zero on success
*/
int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code)
{
int rc = 0;
int i;
str[0] = '\0';
for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++)
if (cipher_code == ecryptfs_cipher_code_str_map[i].cipher_code)
strcpy(str, ecryptfs_cipher_code_str_map[i].cipher_str);
if (str[0] == '\0') {
ecryptfs_printk(KERN_WARNING, "Cipher code not recognized: "
"[%d]\n", cipher_code);
rc = -EINVAL;
}
return rc;
}
int ecryptfs_read_and_validate_header_region(struct inode *inode)
{
u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
int rc;
rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
inode);
if (rc < 0)
return rc;
else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
return -EINVAL;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
return rc;
}
void
ecryptfs_write_header_metadata(char *virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written)
{
u32 header_extent_size;
u16 num_header_extents_at_front;
header_extent_size = (u32)crypt_stat->extent_size;
num_header_extents_at_front =
(u16)(crypt_stat->metadata_size / crypt_stat->extent_size);
put_unaligned_be32(header_extent_size, virt);
virt += 4;
put_unaligned_be16(num_header_extents_at_front, virt);
(*written) = 6;
}
struct kmem_cache *ecryptfs_header_cache;
/**
* ecryptfs_write_headers_virt
* @page_virt: The virtual address to write the headers to
* @max: The size of memory allocated at page_virt
* @size: Set to the number of bytes written by this function
* @crypt_stat: The cryptographic context
* @ecryptfs_dentry: The eCryptfs dentry
*
* Format version: 1
*
* Header Extent:
* Octets 0-7: Unencrypted file size (big-endian)
* Octets 8-15: eCryptfs special marker
* Octets 16-19: Flags
* Octet 16: File format version number (between 0 and 255)
* Octets 17-18: Reserved
* Octet 19: Bit 1 (lsb): Reserved
* Bit 2: Encrypted?
* Bits 3-8: Reserved
* Octets 20-23: Header extent size (big-endian)
* Octets 24-25: Number of header extents at front of file
* (big-endian)
* Octet 26: Begin RFC 2440 authentication token packet set
* Data Extent 0:
* Lower data (CBC encrypted)
* Data Extent 1:
* Lower data (CBC encrypted)
* ...
*
* Returns zero on success
*/
static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
size_t *size,
struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry)
{
int rc;
size_t written;
size_t offset;
offset = ECRYPTFS_FILE_SIZE_BYTES;
write_ecryptfs_marker((page_virt + offset), &written);
offset += written;
ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat,
&written);
offset += written;
ecryptfs_write_header_metadata((page_virt + offset), crypt_stat,
&written);
offset += written;
rc = ecryptfs_generate_key_packet_set((page_virt + offset), crypt_stat,
ecryptfs_dentry, &written,
max - offset);
if (rc)
ecryptfs_printk(KERN_WARNING, "Error generating key packet "
"set; rc = [%d]\n", rc);
if (size) {
offset += written;
*size = offset;
}
return rc;
}
static int
ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
char *virt, size_t virt_len)
{
int rc;
rc = ecryptfs_write_lower(ecryptfs_inode, virt,
0, virt_len);
if (rc < 0)
printk(KERN_ERR "%s: Error attempting to write header "
"information to lower file; rc = [%d]\n", __func__, rc);
else
rc = 0;
return rc;
}
static int
ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
struct inode *ecryptfs_inode,
char *page_virt, size_t size)
{
int rc;
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
struct inode *lower_inode = d_inode(lower_dentry);
if (!(lower_inode->i_opflags & IOP_XATTR)) {
rc = -EOPNOTSUPP;
goto out;
}
inode_lock(lower_inode);
rc = __vfs_setxattr(&nop_mnt_idmap, lower_dentry, lower_inode,
ECRYPTFS_XATTR_NAME, page_virt, size, 0);
if (!rc && ecryptfs_inode)
fsstack_copy_attr_all(ecryptfs_inode, lower_inode);
inode_unlock(lower_inode);
out:
return rc;
}
static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
unsigned int order)
{
struct page *page;
page = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (page)
return (unsigned long) page_address(page);
return 0;
}
/**
* ecryptfs_write_metadata
* @ecryptfs_dentry: The eCryptfs dentry, which should be negative
* @ecryptfs_inode: The newly created eCryptfs inode
*
* Write the file headers out. This will likely involve a userspace
* callout, in which the session key is encrypted with one or more
* public keys and/or the passphrase necessary to do the encryption is
* retrieved via a prompt. Exactly what happens at this point should
* be policy-dependent.
*
* Returns zero on success; non-zero on error
*/
int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
unsigned int order;
char *virt;
size_t virt_len;
size_t size = 0;
int rc = 0;
if (likely(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
printk(KERN_ERR "Key is invalid; bailing out\n");
rc = -EINVAL;
goto out;
}
} else {
printk(KERN_WARNING "%s: Encrypted flag not set\n",
__func__);
rc = -EINVAL;
goto out;
}
virt_len = crypt_stat->metadata_size;
order = get_order(virt_len);
/* Released in this function */
virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
if (!virt) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
rc = -ENOMEM;
goto out;
}
/* Zeroed page ensures the in-header unencrypted i_size is set to 0 */
rc = ecryptfs_write_headers_virt(virt, virt_len, &size, crypt_stat,
ecryptfs_dentry);
if (unlikely(rc)) {
printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
__func__, rc);
goto out_free;
}
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, ecryptfs_inode,
virt, size);
else
rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
virt_len);
if (rc) {
printk(KERN_ERR "%s: Error writing metadata out to lower file; "
"rc = [%d]\n", __func__, rc);
goto out_free;
}
out_free:
free_pages((unsigned long)virt, order);
out:
return rc;
}
#define ECRYPTFS_DONT_VALIDATE_HEADER_SIZE 0
#define ECRYPTFS_VALIDATE_HEADER_SIZE 1
static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
char *virt, int *bytes_read,
int validate_header_size)
{
int rc = 0;
u32 header_extent_size;
u16 num_header_extents_at_front;
header_extent_size = get_unaligned_be32(virt);
virt += sizeof(__be32);
num_header_extents_at_front = get_unaligned_be16(virt);
crypt_stat->metadata_size = (((size_t)num_header_extents_at_front
* (size_t)header_extent_size));
(*bytes_read) = (sizeof(__be32) + sizeof(__be16));
if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
&& (crypt_stat->metadata_size
< ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
rc = -EINVAL;
printk(KERN_WARNING "Invalid header size: [%zd]\n",
crypt_stat->metadata_size);
}
return rc;
}
/**
* set_default_header_data
* @crypt_stat: The cryptographic context
*
* For version 0 file format; this function is only for backwards
* compatibility for files created with the prior versions of
* eCryptfs.
*/
static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
{
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
{
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_crypt_stat *crypt_stat;
u64 file_size;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
mount_crypt_stat =
&ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
file_size = i_size_read(ecryptfs_inode_to_lower(inode));
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
file_size += crypt_stat->metadata_size;
} else
file_size = get_unaligned_be64(page_virt);
i_size_write(inode, (loff_t)file_size);
crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
}
/**
* ecryptfs_read_headers_virt
* @page_virt: The virtual address into which to read the headers
* @crypt_stat: The cryptographic context
* @ecryptfs_dentry: The eCryptfs dentry
* @validate_header_size: Whether to validate the header size while reading
*
* Read/parse the header data. The header format is detailed in the
* comment block for the ecryptfs_write_headers_virt() function.
*
* Returns zero on success
*/
static int ecryptfs_read_headers_virt(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry,
int validate_header_size)
{
int rc = 0;
int offset;
int bytes_read;
ecryptfs_set_default_sizes(crypt_stat);
crypt_stat->mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
offset = ECRYPTFS_FILE_SIZE_BYTES;
rc = ecryptfs_validate_marker(page_virt + offset);
if (rc)
goto out;
if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
ecryptfs_i_size_init(page_virt, d_inode(ecryptfs_dentry));
offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
ecryptfs_process_flags(crypt_stat, (page_virt + offset), &bytes_read);
if (crypt_stat->file_version > ECRYPTFS_SUPPORTED_FILE_VERSION) {
ecryptfs_printk(KERN_WARNING, "File version is [%d]; only "
"file version [%d] is supported by this "
"version of eCryptfs\n",
crypt_stat->file_version,
ECRYPTFS_SUPPORTED_FILE_VERSION);
rc = -EINVAL;
goto out;
}
offset += bytes_read;
if (crypt_stat->file_version >= 1) {
rc = parse_header_metadata(crypt_stat, (page_virt + offset),
&bytes_read, validate_header_size);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error reading header "
"metadata; rc = [%d]\n", rc);
}
offset += bytes_read;
} else
set_default_header_data(crypt_stat);
rc = ecryptfs_parse_packet_set(crypt_stat, (page_virt + offset),
ecryptfs_dentry);
out:
return rc;
}
/**
* ecryptfs_read_xattr_region
* @page_virt: The vitual address into which to read the xattr data
* @ecryptfs_inode: The eCryptfs inode
*
* Attempts to read the crypto metadata from the extended attribute
* region of the lower file.
*
* Returns zero on success; non-zero on error
*/
int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode)
{
struct dentry *lower_dentry =
ecryptfs_inode_to_private(ecryptfs_inode)->lower_file->f_path.dentry;
ssize_t size;
int rc = 0;
size = ecryptfs_getxattr_lower(lower_dentry,
ecryptfs_inode_to_lower(ecryptfs_inode),
ECRYPTFS_XATTR_NAME,
page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE);
if (size < 0) {
if (unlikely(ecryptfs_verbosity > 0))
printk(KERN_INFO "Error attempting to read the [%s] "
"xattr from the lower file; return value = "
"[%zd]\n", ECRYPTFS_XATTR_NAME, size);
rc = -EINVAL;
goto out;
}
out:
return rc;
}
int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
struct inode *inode)
{
u8 file_size[ECRYPTFS_SIZE_AND_MARKER_BYTES];
u8 *marker = file_size + ECRYPTFS_FILE_SIZE_BYTES;
int rc;
rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry),
ecryptfs_inode_to_lower(inode),
ECRYPTFS_XATTR_NAME, file_size,
ECRYPTFS_SIZE_AND_MARKER_BYTES);
if (rc < 0)
return rc;
else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
return -EINVAL;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
return rc;
}
/*
* ecryptfs_read_metadata
*
* Common entry point for reading file metadata. From here, we could
* retrieve the header information from the header region of the file,
* the xattr region of the file, or some other repository that is
* stored separately from the file itself. The current implementation
* supports retrieving the metadata information from the file contents
* and from the xattr region.
*
* Returns zero if valid headers found and parsed; non-zero otherwise
*/
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
{
int rc;
char *page_virt;
struct inode *ecryptfs_inode = d_inode(ecryptfs_dentry);
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
ecryptfs_copy_mount_wide_flags_to_inode_flags(crypt_stat,
mount_crypt_stat);
/* Read the first page from the underlying file */
page_virt = kmem_cache_alloc(ecryptfs_header_cache, GFP_USER);
if (!page_virt) {
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size,
ecryptfs_inode);
if (rc >= 0)
rc = ecryptfs_read_headers_virt(page_virt, crypt_stat,
ecryptfs_dentry,
ECRYPTFS_VALIDATE_HEADER_SIZE);
if (rc) {
/* metadata is not in the file header, so try xattrs */
memset(page_virt, 0, PAGE_SIZE);
rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
"file header region or xattr region, inode %lu\n",
ecryptfs_inode->i_ino);
rc = -EINVAL;
goto out;
}
rc = ecryptfs_read_headers_virt(page_virt, crypt_stat,
ecryptfs_dentry,
ECRYPTFS_DONT_VALIDATE_HEADER_SIZE);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
"file xattr region either, inode %lu\n",
ecryptfs_inode->i_ino);
rc = -EINVAL;
}
if (crypt_stat->mount_crypt_stat->flags
& ECRYPTFS_XATTR_METADATA_ENABLED) {
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
} else {
printk(KERN_WARNING "Attempt to access file with "
"crypto metadata only in the extended attribute "
"region, but eCryptfs was mounted without "
"xattr support enabled. eCryptfs will not treat "
"this like an encrypted file, inode %lu\n",
ecryptfs_inode->i_ino);
rc = -EINVAL;
}
}
out:
if (page_virt) {
memset(page_virt, 0, PAGE_SIZE);
kmem_cache_free(ecryptfs_header_cache, page_virt);
}
return rc;
}
/*
* ecryptfs_encrypt_filename - encrypt filename
*
* CBC-encrypts the filename. We do not want to encrypt the same
* filename with the same key and IV, which may happen with hard
* links, so we prepend random bits to each filename.
*
* Returns zero on success; non-zero otherwise
*/
static int
ecryptfs_encrypt_filename(struct ecryptfs_filename *filename,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
int rc = 0;
filename->encrypted_filename = NULL;
filename->encrypted_filename_size = 0;
if (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)) {
size_t packet_size;
size_t remaining_bytes;
rc = ecryptfs_write_tag_70_packet(
NULL, NULL,
&filename->encrypted_filename_size,
mount_crypt_stat, NULL,
filename->filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to get packet "
"size for tag 72; rc = [%d]\n", __func__,
rc);
filename->encrypted_filename_size = 0;
goto out;
}
filename->encrypted_filename =
kmalloc(filename->encrypted_filename_size, GFP_KERNEL);
if (!filename->encrypted_filename) {
rc = -ENOMEM;
goto out;
}
remaining_bytes = filename->encrypted_filename_size;
rc = ecryptfs_write_tag_70_packet(filename->encrypted_filename,
&remaining_bytes,
&packet_size,
mount_crypt_stat,
filename->filename,
filename->filename_size);
if (rc) {
printk(KERN_ERR "%s: Error attempting to generate "
"tag 70 packet; rc = [%d]\n", __func__,
rc);
kfree(filename->encrypted_filename);
filename->encrypted_filename = NULL;
filename->encrypted_filename_size = 0;
goto out;
}
filename->encrypted_filename_size = packet_size;
} else {
printk(KERN_ERR "%s: No support for requested filename "
"encryption method in this release\n", __func__);
rc = -EOPNOTSUPP;
goto out;
}
out:
return rc;
}
static int ecryptfs_copy_filename(char **copied_name, size_t *copied_name_size,
const char *name, size_t name_size)
{
int rc = 0;
(*copied_name) = kmalloc((name_size + 1), GFP_KERNEL);
if (!(*copied_name)) {
rc = -ENOMEM;
goto out;
}
memcpy((void *)(*copied_name), (void *)name, name_size);
(*copied_name)[(name_size)] = '\0'; /* Only for convenience
* in printing out the
* string in debug
* messages */
(*copied_name_size) = name_size;
out:
return rc;
}
/**
* ecryptfs_process_key_cipher - Perform key cipher initialization.
* @key_tfm: Crypto context for key material, set by this function
* @cipher_name: Name of the cipher
* @key_size: Size of the key in bytes
*
* Returns zero on success. Any crypto_tfm structs allocated here
* should be released by other functions, such as on a superblock put
* event, regardless of whether this function succeeds for fails.
*/
static int
ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm,
char *cipher_name, size_t *key_size)
{
char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
char *full_alg_name = NULL;
int rc;
*key_tfm = NULL;
if (*key_size > ECRYPTFS_MAX_KEY_BYTES) {
rc = -EINVAL;
printk(KERN_ERR "Requested key size is [%zd] bytes; maximum "
"allowable is [%d]\n", *key_size, ECRYPTFS_MAX_KEY_BYTES);
goto out;
}
rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, cipher_name,
"ecb");
if (rc)
goto out;
*key_tfm = crypto_alloc_skcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*key_tfm)) {
rc = PTR_ERR(*key_tfm);
printk(KERN_ERR "Unable to allocate crypto cipher with name "
"[%s]; rc = [%d]\n", full_alg_name, rc);
goto out;
}
crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
if (*key_size == 0)
*key_size = crypto_skcipher_max_keysize(*key_tfm);
get_random_bytes(dummy_key, *key_size);
rc = crypto_skcipher_setkey(*key_tfm, dummy_key, *key_size);
if (rc) {
printk(KERN_ERR "Error attempting to set key of size [%zd] for "
"cipher [%s]; rc = [%d]\n", *key_size, full_alg_name,
rc);
rc = -EINVAL;
goto out;
}
out:
kfree(full_alg_name);
return rc;
}
struct kmem_cache *ecryptfs_key_tfm_cache;
static struct list_head key_tfm_list;
DEFINE_MUTEX(key_tfm_list_mutex);
int __init ecryptfs_init_crypto(void)
{
INIT_LIST_HEAD(&key_tfm_list);
return 0;
}
/**
* ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list
*
* Called only at module unload time
*/
int ecryptfs_destroy_crypto(void)
{
struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp;
mutex_lock(&key_tfm_list_mutex);
list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list,
key_tfm_list) {
list_del(&key_tfm->key_tfm_list);
crypto_free_skcipher(key_tfm->key_tfm);
kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm);
}
mutex_unlock(&key_tfm_list_mutex);
return 0;
}
int
ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
size_t key_size)
{
struct ecryptfs_key_tfm *tmp_tfm;
int rc = 0;
BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL);
if (key_tfm)
(*key_tfm) = tmp_tfm;
if (!tmp_tfm) {
rc = -ENOMEM;
goto out;
}
mutex_init(&tmp_tfm->key_tfm_mutex);
strncpy(tmp_tfm->cipher_name, cipher_name,
ECRYPTFS_MAX_CIPHER_NAME_SIZE);
tmp_tfm->cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
tmp_tfm->key_size = key_size;
rc = ecryptfs_process_key_cipher(&tmp_tfm->key_tfm,
tmp_tfm->cipher_name,
&tmp_tfm->key_size);
if (rc) {
printk(KERN_ERR "Error attempting to initialize key TFM "
"cipher with name = [%s]; rc = [%d]\n",
tmp_tfm->cipher_name, rc);
kmem_cache_free(ecryptfs_key_tfm_cache, tmp_tfm);
if (key_tfm)
(*key_tfm) = NULL;
goto out;
}
list_add(&tmp_tfm->key_tfm_list, &key_tfm_list);
out:
return rc;
}
/**
* ecryptfs_tfm_exists - Search for existing tfm for cipher_name.
* @cipher_name: the name of the cipher to search for
* @key_tfm: set to corresponding tfm if found
*
* Searches for cached key_tfm matching @cipher_name
* Must be called with &key_tfm_list_mutex held
* Returns 1 if found, with @key_tfm set
* Returns 0 if not found, with @key_tfm set to NULL
*/
int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm)
{
struct ecryptfs_key_tfm *tmp_key_tfm;
BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) {
if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) {
if (key_tfm)
(*key_tfm) = tmp_key_tfm;
return 1;
}
}
if (key_tfm)
(*key_tfm) = NULL;
return 0;
}
/**
* ecryptfs_get_tfm_and_mutex_for_cipher_name
*
* @tfm: set to cached tfm found, or new tfm created
* @tfm_mutex: set to mutex for cached tfm found, or new tfm created
* @cipher_name: the name of the cipher to search for and/or add
*
* Sets pointers to @tfm & @tfm_mutex matching @cipher_name.
* Searches for cached item first, and creates new if not found.
* Returns 0 on success, non-zero if adding new cipher failed
*/
int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_skcipher **tfm,
struct mutex **tfm_mutex,
char *cipher_name)
{
struct ecryptfs_key_tfm *key_tfm;
int rc = 0;
(*tfm) = NULL;
(*tfm_mutex) = NULL;
mutex_lock(&key_tfm_list_mutex);
if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) {
rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0);
if (rc) {
printk(KERN_ERR "Error adding new key_tfm to list; "
"rc = [%d]\n", rc);
goto out;
}
}
(*tfm) = key_tfm->key_tfm;
(*tfm_mutex) = &key_tfm->key_tfm_mutex;
out:
mutex_unlock(&key_tfm_list_mutex);
return rc;
}
/* 64 characters forming a 6-bit target field */
static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
"EFGHIJKLMNOPQRST"
"UVWXYZabcdefghij"
"klmnopqrstuvwxyz");
/* We could either offset on every reverse map or just pad some 0x00's
* at the front here */
static const unsigned char filename_rev_map[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* 47 */
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, /* 55 */
0x0A, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */
0x00, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, /* 71 */
0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, /* 79 */
0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, /* 87 */
0x23, 0x24, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
};
/**
* ecryptfs_encode_for_filename
* @dst: Destination location for encoded filename
* @dst_size: Size of the encoded filename in bytes
* @src: Source location for the filename to encode
* @src_size: Size of the source in bytes
*/
static void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
unsigned char *src, size_t src_size)
{
size_t num_blocks;
size_t block_num = 0;
size_t dst_offset = 0;
unsigned char last_block[3];
if (src_size == 0) {
(*dst_size) = 0;
goto out;
}
num_blocks = (src_size / 3);
if ((src_size % 3) == 0) {
memcpy(last_block, (&src[src_size - 3]), 3);
} else {
num_blocks++;
last_block[2] = 0x00;
switch (src_size % 3) {
case 1:
last_block[0] = src[src_size - 1];
last_block[1] = 0x00;
break;
case 2:
last_block[0] = src[src_size - 2];
last_block[1] = src[src_size - 1];
}
}
(*dst_size) = (num_blocks * 4);
if (!dst)
goto out;
while (block_num < num_blocks) {
unsigned char *src_block;
unsigned char dst_block[4];
if (block_num == (num_blocks - 1))
src_block = last_block;
else
src_block = &src[block_num * 3];
dst_block[0] = ((src_block[0] >> 2) & 0x3F);
dst_block[1] = (((src_block[0] << 4) & 0x30)
| ((src_block[1] >> 4) & 0x0F));
dst_block[2] = (((src_block[1] << 2) & 0x3C)
| ((src_block[2] >> 6) & 0x03));
dst_block[3] = (src_block[2] & 0x3F);
dst[dst_offset++] = portable_filename_chars[dst_block[0]];
dst[dst_offset++] = portable_filename_chars[dst_block[1]];
dst[dst_offset++] = portable_filename_chars[dst_block[2]];
dst[dst_offset++] = portable_filename_chars[dst_block[3]];
block_num++;
}
out:
return;
}
static size_t ecryptfs_max_decoded_size(size_t encoded_size)
{
/* Not exact; conservatively long. Every block of 4
* encoded characters decodes into a block of 3
* decoded characters. This segment of code provides
* the caller with the maximum amount of allocated
* space that @dst will need to point to in a
* subsequent call. */
return ((encoded_size + 1) * 3) / 4;
}
/**
* ecryptfs_decode_from_filename
* @dst: If NULL, this function only sets @dst_size and returns. If
* non-NULL, this function decodes the encoded octets in @src
* into the memory that @dst points to.
* @dst_size: Set to the size of the decoded string.
* @src: The encoded set of octets to decode.
* @src_size: The size of the encoded set of octets to decode.
*/
static void
ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
const unsigned char *src, size_t src_size)
{
u8 current_bit_offset = 0;
size_t src_byte_offset = 0;
size_t dst_byte_offset = 0;
if (!dst) {
(*dst_size) = ecryptfs_max_decoded_size(src_size);
goto out;
}
while (src_byte_offset < src_size) {
unsigned char src_byte =
filename_rev_map[(int)src[src_byte_offset]];
switch (current_bit_offset) {
case 0:
dst[dst_byte_offset] = (src_byte << 2);
current_bit_offset = 6;
break;
case 6:
dst[dst_byte_offset++] |= (src_byte >> 4);
dst[dst_byte_offset] = ((src_byte & 0xF)
<< 4);
current_bit_offset = 4;
break;
case 4:
dst[dst_byte_offset++] |= (src_byte >> 2);
dst[dst_byte_offset] = (src_byte << 6);
current_bit_offset = 2;
break;
case 2:
dst[dst_byte_offset++] |= (src_byte);
current_bit_offset = 0;
break;
}
src_byte_offset++;
}
(*dst_size) = dst_byte_offset;
out:
return;
}
/**
* ecryptfs_encrypt_and_encode_filename - converts a plaintext file name to cipher text
* @encoded_name: The encrypted name
* @encoded_name_size: Length of the encrypted name
* @mount_crypt_stat: The crypt_stat struct associated with the file name to encode
* @name: The plaintext name
* @name_size: The length of the plaintext name
*
* Encrypts and encodes a filename into something that constitutes a
* valid filename for a filesystem, with printable characters.
*
* We assume that we have a properly initialized crypto context,
* pointed to by crypt_stat->tfm.
*
* Returns zero on success; non-zero on otherwise
*/
int ecryptfs_encrypt_and_encode_filename(
char **encoded_name,
size_t *encoded_name_size,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
const char *name, size_t name_size)
{
size_t encoded_name_no_prefix_size;
int rc = 0;
(*encoded_name) = NULL;
(*encoded_name_size) = 0;
if (mount_crypt_stat && (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
struct ecryptfs_filename *filename;
filename = kzalloc(sizeof(*filename), GFP_KERNEL);
if (!filename) {
rc = -ENOMEM;
goto out;
}
filename->filename = (char *)name;
filename->filename_size = name_size;
rc = ecryptfs_encrypt_filename(filename, mount_crypt_stat);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt "
"filename; rc = [%d]\n", __func__, rc);
kfree(filename);
goto out;
}
ecryptfs_encode_for_filename(
NULL, &encoded_name_no_prefix_size,
filename->encrypted_filename,
filename->encrypted_filename_size);
if (mount_crypt_stat
&& (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK))
(*encoded_name_size) =
(ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
else
(*encoded_name_size) =
(ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
(*encoded_name) = kmalloc((*encoded_name_size) + 1, GFP_KERNEL);
if (!(*encoded_name)) {
rc = -ENOMEM;
kfree(filename->encrypted_filename);
kfree(filename);
goto out;
}
if (mount_crypt_stat
&& (mount_crypt_stat->flags
& ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK)) {
memcpy((*encoded_name),
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE);
ecryptfs_encode_for_filename(
((*encoded_name)
+ ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE),
&encoded_name_no_prefix_size,
filename->encrypted_filename,
filename->encrypted_filename_size);
(*encoded_name_size) =
(ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
+ encoded_name_no_prefix_size);
(*encoded_name)[(*encoded_name_size)] = '\0';
} else {
rc = -EOPNOTSUPP;
}
if (rc) {
printk(KERN_ERR "%s: Error attempting to encode "
"encrypted filename; rc = [%d]\n", __func__,
rc);
kfree((*encoded_name));
(*encoded_name) = NULL;
(*encoded_name_size) = 0;
}
kfree(filename->encrypted_filename);
kfree(filename);
} else {
rc = ecryptfs_copy_filename(encoded_name,
encoded_name_size,
name, name_size);
}
out:
return rc;
}
static bool is_dot_dotdot(const char *name, size_t name_size)
{
if (name_size == 1 && name[0] == '.')
return true;
else if (name_size == 2 && name[0] == '.' && name[1] == '.')
return true;
return false;
}
/**
* ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
* @plaintext_name: The plaintext name
* @plaintext_name_size: The plaintext name size
* @sb: Ecryptfs's super_block
* @name: The filename in cipher text
* @name_size: The cipher text name size
*
* Decrypts and decodes the filename.
*
* Returns zero on error; non-zero otherwise
*/
int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
size_t *plaintext_name_size,
struct super_block *sb,
const char *name, size_t name_size)
{
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
&ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
char *decoded_name;
size_t decoded_name_size;
size_t packet_size;
int rc = 0;
if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) &&
!(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) {
if (is_dot_dotdot(name, name_size)) {
rc = ecryptfs_copy_filename(plaintext_name,
plaintext_name_size,
name, name_size);
goto out;
}
if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE ||
strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) {
rc = -EINVAL;
goto out;
}
name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
ecryptfs_decode_from_filename(NULL, &decoded_name_size,
name, name_size);
decoded_name = kmalloc(decoded_name_size, GFP_KERNEL);
if (!decoded_name) {
rc = -ENOMEM;
goto out;
}
ecryptfs_decode_from_filename(decoded_name, &decoded_name_size,
name, name_size);
rc = ecryptfs_parse_tag_70_packet(plaintext_name,
plaintext_name_size,
&packet_size,
mount_crypt_stat,
decoded_name,
decoded_name_size);
if (rc) {
ecryptfs_printk(KERN_DEBUG,
"%s: Could not parse tag 70 packet from filename\n",
__func__);
goto out_free;
}
} else {
rc = ecryptfs_copy_filename(plaintext_name,
plaintext_name_size,
name, name_size);
goto out;
}
out_free:
kfree(decoded_name);
out:
return rc;
}
#define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143
int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
{
struct crypto_skcipher *tfm;
struct mutex *tfm_mutex;
size_t cipher_blocksize;
int rc;
if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
(*namelen) = lower_namelen;
return 0;
}
rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
mount_crypt_stat->global_default_fn_cipher_name);
if (unlikely(rc)) {
(*namelen) = 0;
return rc;
}
mutex_lock(tfm_mutex);
cipher_blocksize = crypto_skcipher_blocksize(tfm);
mutex_unlock(tfm_mutex);
/* Return an exact amount for the common cases */
if (lower_namelen == NAME_MAX
&& (cipher_blocksize == 8 || cipher_blocksize == 16)) {
(*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16;
return 0;
}
/* Return a safe estimate for the uncommon cases */
(*namelen) = lower_namelen;
(*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
/* Since this is the max decoded size, subtract 1 "decoded block" len */
(*namelen) = ecryptfs_max_decoded_size(*namelen) - 3;
(*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE;
(*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES;
/* Worst case is that the filename is padded nearly a full block size */
(*namelen) -= cipher_blocksize - 1;
if ((*namelen) < 0)
(*namelen) = 0;
return 0;
}
| linux-master | fs/ecryptfs/crypto.c |
// SPDX-License-Identifier: GPL-2.0
/*
* uncompress.c
*
* (C) Copyright 1999 Linus Torvalds
*
* cramfs interfaces to the uncompression library. There's really just
* three entrypoints:
*
* - cramfs_uncompress_init() - called to initialize the thing.
* - cramfs_uncompress_exit() - tell me when you're done
* - cramfs_uncompress_block() - uncompress a block.
*
* NOTE NOTE NOTE! The uncompression is entirely single-threaded. We
* only have one stream, and we'll initialize it only once even if it
* then is used by multiple filesystems.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/vmalloc.h>
#include <linux/zlib.h>
#include "internal.h"
static z_stream stream;
static int initialized;
/* Returns length of decompressed data. */
int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen)
{
int err;
stream.next_in = src;
stream.avail_in = srclen;
stream.next_out = dst;
stream.avail_out = dstlen;
err = zlib_inflateReset(&stream);
if (err != Z_OK) {
pr_err("zlib_inflateReset error %d\n", err);
zlib_inflateEnd(&stream);
zlib_inflateInit(&stream);
}
err = zlib_inflate(&stream, Z_FINISH);
if (err != Z_STREAM_END)
goto err;
return stream.total_out;
err:
pr_err("Error %d while decompressing!\n", err);
pr_err("%p(%d)->%p(%d)\n", src, srclen, dst, dstlen);
return -EIO;
}
int cramfs_uncompress_init(void)
{
if (!initialized++) {
stream.workspace = vmalloc(zlib_inflate_workspacesize());
if (!stream.workspace) {
initialized = 0;
return -ENOMEM;
}
stream.next_in = NULL;
stream.avail_in = 0;
zlib_inflateInit(&stream);
}
return 0;
}
void cramfs_uncompress_exit(void)
{
if (!--initialized) {
zlib_inflateEnd(&stream);
vfree(stream.workspace);
}
}
| linux-master | fs/cramfs/uncompress.c |
/*
* Compressed rom filesystem for Linux.
*
* Copyright (C) 1999 Linus Torvalds.
*
* This file is released under the GPL.
*/
/*
* These are the VFS interfaces to the compressed rom filesystem.
* The actual compression is based on zlib, see the other files.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/pfn_t.h>
#include <linux/ramfs.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/super.h>
#include <linux/fs_context.h>
#include <linux/slab.h>
#include <linux/vfs.h>
#include <linux/mutex.h>
#include <uapi/linux/cramfs_fs.h>
#include <linux/uaccess.h>
#include "internal.h"
/*
* cramfs super-block data in memory
*/
struct cramfs_sb_info {
unsigned long magic;
unsigned long size;
unsigned long blocks;
unsigned long files;
unsigned long flags;
void *linear_virt_addr;
resource_size_t linear_phys_addr;
size_t mtd_point_size;
};
static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
static const struct super_operations cramfs_ops;
static const struct inode_operations cramfs_dir_inode_operations;
static const struct file_operations cramfs_directory_operations;
static const struct file_operations cramfs_physmem_fops;
static const struct address_space_operations cramfs_aops;
static DEFINE_MUTEX(read_mutex);
/* These macros may change in future, to provide better st_ino semantics. */
#define OFFSET(x) ((x)->i_ino)
static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
{
if (!cino->offset)
return offset + 1;
if (!cino->size)
return offset + 1;
/*
* The file mode test fixes buggy mkcramfs implementations where
* cramfs_inode->offset is set to a non zero value for entries
* which did not contain data, like devices node and fifos.
*/
switch (cino->mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
return cino->offset << 2;
default:
break;
}
return offset + 1;
}
static struct inode *get_cramfs_inode(struct super_block *sb,
const struct cramfs_inode *cramfs_inode, unsigned int offset)
{
struct inode *inode;
static struct timespec64 zerotime;
inode = iget_locked(sb, cramino(cramfs_inode, offset));
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
switch (cramfs_inode->mode & S_IFMT) {
case S_IFREG:
inode->i_fop = &generic_ro_fops;
inode->i_data.a_ops = &cramfs_aops;
if (IS_ENABLED(CONFIG_CRAMFS_MTD) &&
CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS &&
CRAMFS_SB(sb)->linear_phys_addr)
inode->i_fop = &cramfs_physmem_fops;
break;
case S_IFDIR:
inode->i_op = &cramfs_dir_inode_operations;
inode->i_fop = &cramfs_directory_operations;
break;
case S_IFLNK:
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_data.a_ops = &cramfs_aops;
break;
default:
init_special_inode(inode, cramfs_inode->mode,
old_decode_dev(cramfs_inode->size));
}
inode->i_mode = cramfs_inode->mode;
i_uid_write(inode, cramfs_inode->uid);
i_gid_write(inode, cramfs_inode->gid);
/* if the lower 2 bits are zero, the inode contains data */
if (!(inode->i_ino & 3)) {
inode->i_size = cramfs_inode->size;
inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
}
/* Struct copy intentional */
inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
zerotime);
/* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory
contents. 1 yields the right result in GNU find, even
without -noleaf option. */
unlock_new_inode(inode);
return inode;
}
/*
* We have our own block cache: don't fill up the buffer cache
* with the rom-image, because the way the filesystem is set
* up the accesses should be fairly regular and cached in the
* page cache and dentry tree anyway..
*
* This also acts as a way to guarantee contiguous areas of up to
* BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
* worry about end-of-buffer issues even when decompressing a full
* page cache.
*
* Note: This is all optimized away at compile time when
* CONFIG_CRAMFS_BLOCKDEV=n.
*/
#define READ_BUFFERS (2)
/* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
#define NEXT_BUFFER(_ix) ((_ix) ^ 1)
/*
* BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
* data that takes up more space than the original and with unlucky
* alignment.
*/
#define BLKS_PER_BUF_SHIFT (2)
#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
static unsigned buffer_blocknr[READ_BUFFERS];
static struct super_block *buffer_dev[READ_BUFFERS];
static int next_buffer;
/*
* Populate our block cache and return a pointer to it.
*/
static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
unsigned int len)
{
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
struct file_ra_state ra = {};
struct page *pages[BLKS_PER_BUF];
unsigned i, blocknr, buffer;
unsigned long devsize;
char *data;
if (!len)
return NULL;
blocknr = offset >> PAGE_SHIFT;
offset &= PAGE_SIZE - 1;
/* Check if an existing buffer already has the data.. */
for (i = 0; i < READ_BUFFERS; i++) {
unsigned int blk_offset;
if (buffer_dev[i] != sb)
continue;
if (blocknr < buffer_blocknr[i])
continue;
blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
blk_offset += offset;
if (blk_offset > BUFFER_SIZE ||
blk_offset + len > BUFFER_SIZE)
continue;
return read_buffers[i] + blk_offset;
}
devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT;
/* Ok, read in BLKS_PER_BUF pages completely first. */
file_ra_state_init(&ra, mapping);
page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF);
for (i = 0; i < BLKS_PER_BUF; i++) {
struct page *page = NULL;
if (blocknr + i < devsize) {
page = read_mapping_page(mapping, blocknr + i, NULL);
/* synchronous error? */
if (IS_ERR(page))
page = NULL;
}
pages[i] = page;
}
buffer = next_buffer;
next_buffer = NEXT_BUFFER(buffer);
buffer_blocknr[buffer] = blocknr;
buffer_dev[buffer] = sb;
data = read_buffers[buffer];
for (i = 0; i < BLKS_PER_BUF; i++) {
struct page *page = pages[i];
if (page) {
memcpy_from_page(data, page, 0, PAGE_SIZE);
put_page(page);
} else
memset(data, 0, PAGE_SIZE);
data += PAGE_SIZE;
}
return read_buffers[buffer] + offset;
}
/*
* Return a pointer to the linearly addressed cramfs image in memory.
*/
static void *cramfs_direct_read(struct super_block *sb, unsigned int offset,
unsigned int len)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
if (!len)
return NULL;
if (len > sbi->size || offset > sbi->size - len)
return page_address(ZERO_PAGE(0));
return sbi->linear_virt_addr + offset;
}
/*
* Returns a pointer to a buffer containing at least LEN bytes of
* filesystem starting at byte offset OFFSET into the filesystem.
*/
static void *cramfs_read(struct super_block *sb, unsigned int offset,
unsigned int len)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr)
return cramfs_direct_read(sb, offset, len);
else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
return cramfs_blkdev_read(sb, offset, len);
else
return NULL;
}
/*
* For a mapping to be possible, we need a range of uncompressed and
* contiguous blocks. Return the offset for the first block and number of
* valid blocks for which that is true, or zero otherwise.
*/
static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
int i;
u32 *blockptrs, first_block_addr;
/*
* We can dereference memory directly here as this code may be
* reached only when there is a direct filesystem image mapping
* available in memory.
*/
blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4);
first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS;
i = 0;
do {
u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
u32 expect = (first_block_addr + block_off) |
CRAMFS_BLK_FLAG_DIRECT_PTR |
CRAMFS_BLK_FLAG_UNCOMPRESSED;
if (blockptrs[i] != expect) {
pr_debug("range: block %d/%d got %#x expects %#x\n",
pgoff+i, pgoff + *pages - 1,
blockptrs[i], expect);
if (i == 0)
return 0;
break;
}
} while (++i < *pages);
*pages = i;
return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
}
#ifdef CONFIG_MMU
/*
* Return true if the last page of a file in the filesystem image contains
* some other data that doesn't belong to that file. It is assumed that the
* last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED
* (verified by cramfs_get_block_range() and directly accessible in memory.
*/
static bool cramfs_last_page_is_shared(struct inode *inode)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
u32 partial, last_page, blockaddr, *blockptrs;
char *tail_data;
partial = offset_in_page(inode->i_size);
if (!partial)
return false;
last_page = inode->i_size >> PAGE_SHIFT;
blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode));
blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS;
blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
tail_data = sbi->linear_virt_addr + blockaddr + partial;
return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
}
static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
unsigned int pages, max_pages, offset;
unsigned long address, pgoff = vma->vm_pgoff;
char *bailout_reason;
int ret;
ret = generic_file_readonly_mmap(file, vma);
if (ret)
return ret;
/*
* Now try to pre-populate ptes for this vma with a direct
* mapping avoiding memory allocation when possible.
*/
/* Could COW work here? */
bailout_reason = "vma is writable";
if (vma->vm_flags & VM_WRITE)
goto bailout;
max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
bailout_reason = "beyond file limit";
if (pgoff >= max_pages)
goto bailout;
pages = min(vma_pages(vma), max_pages - pgoff);
offset = cramfs_get_block_range(inode, pgoff, &pages);
bailout_reason = "unsuitable block layout";
if (!offset)
goto bailout;
address = sbi->linear_phys_addr + offset;
bailout_reason = "data is not page aligned";
if (!PAGE_ALIGNED(address))
goto bailout;
/* Don't map the last page if it contains some other data */
if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
pr_debug("mmap: %pD: last page is shared\n", file);
pages--;
}
if (!pages) {
bailout_reason = "no suitable block remaining";
goto bailout;
}
if (pages == vma_pages(vma)) {
/*
* The entire vma is mappable. remap_pfn_range() will
* make it distinguishable from a non-direct mapping
* in /proc/<pid>/maps by substituting the file offset
* with the actual physical address.
*/
ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
pages * PAGE_SIZE, vma->vm_page_prot);
} else {
/*
* Let's create a mixed map if we can't map it all.
* The normal paging machinery will take care of the
* unpopulated ptes via cramfs_read_folio().
*/
int i;
vm_flags_set(vma, VM_MIXEDMAP);
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
}
}
if (!ret)
pr_debug("mapped %pD[%lu] at 0x%08lx (%u/%lu pages) "
"to vma 0x%08lx, page_prot 0x%llx\n", file,
pgoff, address, pages, vma_pages(vma), vma->vm_start,
(unsigned long long)pgprot_val(vma->vm_page_prot));
return ret;
bailout:
pr_debug("%pD[%lu]: direct mmap impossible: %s\n",
file, pgoff, bailout_reason);
/* Didn't manage any direct map, but normal paging is still possible */
return 0;
}
#else /* CONFIG_MMU */
static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
{
return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -ENOSYS;
}
static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
unsigned int pages, block_pages, max_pages, offset;
pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (pgoff >= max_pages || pages > max_pages - pgoff)
return -EINVAL;
block_pages = pages;
offset = cramfs_get_block_range(inode, pgoff, &block_pages);
if (!offset || block_pages != pages)
return -ENOSYS;
addr = sbi->linear_phys_addr + offset;
pr_debug("get_unmapped for %pD ofs %#lx siz %lu at 0x%08lx\n",
file, pgoff*PAGE_SIZE, len, addr);
return addr;
}
static unsigned int cramfs_physmem_mmap_capabilities(struct file *file)
{
return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT |
NOMMU_MAP_READ | NOMMU_MAP_EXEC;
}
#endif /* CONFIG_MMU */
static const struct file_operations cramfs_physmem_fops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.splice_read = filemap_splice_read,
.mmap = cramfs_physmem_mmap,
#ifndef CONFIG_MMU
.get_unmapped_area = cramfs_physmem_get_unmapped_area,
.mmap_capabilities = cramfs_physmem_mmap_capabilities,
#endif
};
static void cramfs_kill_sb(struct super_block *sb)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
generic_shutdown_super(sb);
if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
if (sbi && sbi->mtd_point_size)
mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
put_mtd_device(sb->s_mtd);
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
sync_blockdev(sb->s_bdev);
blkdev_put(sb->s_bdev, sb);
}
kfree(sbi);
}
static int cramfs_reconfigure(struct fs_context *fc)
{
sync_filesystem(fc->root->d_sb);
fc->sb_flags |= SB_RDONLY;
return 0;
}
static int cramfs_read_super(struct super_block *sb, struct fs_context *fc,
struct cramfs_super *super)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
unsigned long root_offset;
bool silent = fc->sb_flags & SB_SILENT;
/* We don't know the real size yet */
sbi->size = PAGE_SIZE;
/* Read the first block and get the superblock from it */
mutex_lock(&read_mutex);
memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super));
mutex_unlock(&read_mutex);
/* Do sanity checks on the superblock */
if (super->magic != CRAMFS_MAGIC) {
/* check for wrong endianness */
if (super->magic == CRAMFS_MAGIC_WEND) {
if (!silent)
errorfc(fc, "wrong endianness");
return -EINVAL;
}
/* check at 512 byte offset */
mutex_lock(&read_mutex);
memcpy(super,
cramfs_read(sb, 512, sizeof(*super)),
sizeof(*super));
mutex_unlock(&read_mutex);
if (super->magic != CRAMFS_MAGIC) {
if (super->magic == CRAMFS_MAGIC_WEND && !silent)
errorfc(fc, "wrong endianness");
else if (!silent)
errorfc(fc, "wrong magic");
return -EINVAL;
}
}
/* get feature flags first */
if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
errorfc(fc, "unsupported filesystem features");
return -EINVAL;
}
/* Check that the root inode is in a sane state */
if (!S_ISDIR(super->root.mode)) {
errorfc(fc, "root is not a directory");
return -EINVAL;
}
/* correct strange, hard-coded permissions of mkcramfs */
super->root.mode |= 0555;
root_offset = super->root.offset << 2;
if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
sbi->size = super->size;
sbi->blocks = super->fsid.blocks;
sbi->files = super->fsid.files;
} else {
sbi->size = 1<<28;
sbi->blocks = 0;
sbi->files = 0;
}
sbi->magic = super->magic;
sbi->flags = super->flags;
if (root_offset == 0)
infofc(fc, "empty filesystem");
else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
((root_offset != sizeof(struct cramfs_super)) &&
(root_offset != 512 + sizeof(struct cramfs_super))))
{
errorfc(fc, "bad root offset %lu", root_offset);
return -EINVAL;
}
return 0;
}
static int cramfs_finalize_super(struct super_block *sb,
struct cramfs_inode *cramfs_root)
{
struct inode *root;
/* Set it all up.. */
sb->s_flags |= SB_RDONLY;
sb->s_time_min = 0;
sb->s_time_max = 0;
sb->s_op = &cramfs_ops;
root = get_cramfs_inode(sb, cramfs_root, 0);
if (IS_ERR(root))
return PTR_ERR(root);
sb->s_root = d_make_root(root);
if (!sb->s_root)
return -ENOMEM;
return 0;
}
static int cramfs_blkdev_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct cramfs_sb_info *sbi;
struct cramfs_super super;
int i, err;
sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
/* Invalidate the read buffers on mount: think disk change.. */
for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1;
err = cramfs_read_super(sb, fc, &super);
if (err)
return err;
return cramfs_finalize_super(sb, &super.root);
}
static int cramfs_mtd_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct cramfs_sb_info *sbi;
struct cramfs_super super;
int err;
sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
/* Map only one page for now. Will remap it when fs size is known. */
err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
&sbi->linear_virt_addr, &sbi->linear_phys_addr);
if (err || sbi->mtd_point_size != PAGE_SIZE) {
pr_err("unable to get direct memory access to mtd:%s\n",
sb->s_mtd->name);
return err ? : -ENODATA;
}
pr_info("checking physical address %pap for linear cramfs image\n",
&sbi->linear_phys_addr);
err = cramfs_read_super(sb, fc, &super);
if (err)
return err;
/* Remap the whole filesystem now */
pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n",
sb->s_mtd->name, sbi->size/1024);
mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size,
&sbi->linear_virt_addr, &sbi->linear_phys_addr);
if (err || sbi->mtd_point_size != sbi->size) {
pr_err("unable to get direct memory access to mtd:%s\n",
sb->s_mtd->name);
return err ? : -ENODATA;
}
return cramfs_finalize_super(sb, &super.root);
}
static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
u64 id = 0;
if (sb->s_bdev)
id = huge_encode_dev(sb->s_bdev->bd_dev);
else if (sb->s_dev)
id = huge_encode_dev(sb->s_dev);
buf->f_type = CRAMFS_MAGIC;
buf->f_bsize = PAGE_SIZE;
buf->f_blocks = CRAMFS_SB(sb)->blocks;
buf->f_bfree = 0;
buf->f_bavail = 0;
buf->f_files = CRAMFS_SB(sb)->files;
buf->f_ffree = 0;
buf->f_fsid = u64_to_fsid(id);
buf->f_namelen = CRAMFS_MAXPATHLEN;
return 0;
}
/*
* Read a cramfs directory entry.
*/
static int cramfs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
char *buf;
unsigned int offset;
/* Offset within the thing. */
if (ctx->pos >= inode->i_size)
return 0;
offset = ctx->pos;
/* Directory entries are always 4-byte aligned */
if (offset & 3)
return -EINVAL;
buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (offset < inode->i_size) {
struct cramfs_inode *de;
unsigned long nextoffset;
char *name;
ino_t ino;
umode_t mode;
int namelen;
mutex_lock(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
name = (char *)(de+1);
/*
* Namelengths on disk are shifted by two
* and the name padded out to 4-byte boundaries
* with zeroes.
*/
namelen = de->namelen << 2;
memcpy(buf, name, namelen);
ino = cramino(de, OFFSET(inode) + offset);
mode = de->mode;
mutex_unlock(&read_mutex);
nextoffset = offset + sizeof(*de) + namelen;
for (;;) {
if (!namelen) {
kfree(buf);
return -EIO;
}
if (buf[namelen-1])
break;
namelen--;
}
if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
break;
ctx->pos = offset = nextoffset;
}
kfree(buf);
return 0;
}
/*
* Lookup and fill in the inode data..
*/
static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
unsigned int offset = 0;
struct inode *inode = NULL;
int sorted;
mutex_lock(&read_mutex);
sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
while (offset < dir->i_size) {
struct cramfs_inode *de;
char *name;
int namelen, retval;
int dir_off = OFFSET(dir) + offset;
de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
name = (char *)(de+1);
/* Try to take advantage of sorted directories */
if (sorted && (dentry->d_name.name[0] < name[0]))
break;
namelen = de->namelen << 2;
offset += sizeof(*de) + namelen;
/* Quick check that the name is roughly the right length */
if (((dentry->d_name.len + 3) & ~3) != namelen)
continue;
for (;;) {
if (!namelen) {
inode = ERR_PTR(-EIO);
goto out;
}
if (name[namelen-1])
break;
namelen--;
}
if (namelen != dentry->d_name.len)
continue;
retval = memcmp(dentry->d_name.name, name, namelen);
if (retval > 0)
continue;
if (!retval) {
inode = get_cramfs_inode(dir->i_sb, de, dir_off);
break;
}
/* else (retval < 0) */
if (sorted)
break;
}
out:
mutex_unlock(&read_mutex);
return d_splice_alias(inode, dentry);
}
static int cramfs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
u32 maxblock;
int bytes_filled;
void *pgdata;
maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
bytes_filled = 0;
pgdata = kmap_local_page(page);
if (page->index < maxblock) {
struct super_block *sb = inode->i_sb;
u32 blkptr_offset = OFFSET(inode) + page->index * 4;
u32 block_ptr, block_start, block_len;
bool uncompressed, direct;
mutex_lock(&read_mutex);
block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4);
uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED);
direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR);
block_ptr &= ~CRAMFS_BLK_FLAGS;
if (direct) {
/*
* The block pointer is an absolute start pointer,
* shifted by 2 bits. The size is included in the
* first 2 bytes of the data block when compressed,
* or PAGE_SIZE otherwise.
*/
block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
if (uncompressed) {
block_len = PAGE_SIZE;
/* if last block: cap to file length */
if (page->index == maxblock - 1)
block_len =
offset_in_page(inode->i_size);
} else {
block_len = *(u16 *)
cramfs_read(sb, block_start, 2);
block_start += 2;
}
} else {
/*
* The block pointer indicates one past the end of
* the current block (start of next block). If this
* is the first block then it starts where the block
* pointer table ends, otherwise its start comes
* from the previous block's pointer.
*/
block_start = OFFSET(inode) + maxblock * 4;
if (page->index)
block_start = *(u32 *)
cramfs_read(sb, blkptr_offset - 4, 4);
/* Beware... previous ptr might be a direct ptr */
if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
/* See comments on earlier code. */
u32 prev_start = block_start;
block_start = prev_start & ~CRAMFS_BLK_FLAGS;
block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
block_start += PAGE_SIZE;
} else {
block_len = *(u16 *)
cramfs_read(sb, block_start, 2);
block_start += 2 + block_len;
}
}
block_start &= ~CRAMFS_BLK_FLAGS;
block_len = block_ptr - block_start;
}
if (block_len == 0)
; /* hole */
else if (unlikely(block_len > 2*PAGE_SIZE ||
(uncompressed && block_len > PAGE_SIZE))) {
mutex_unlock(&read_mutex);
pr_err("bad data blocksize %u\n", block_len);
goto err;
} else if (uncompressed) {
memcpy(pgdata,
cramfs_read(sb, block_start, block_len),
block_len);
bytes_filled = block_len;
} else {
bytes_filled = cramfs_uncompress_block(pgdata,
PAGE_SIZE,
cramfs_read(sb, block_start, block_len),
block_len);
}
mutex_unlock(&read_mutex);
if (unlikely(bytes_filled < 0))
goto err;
}
memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
flush_dcache_page(page);
kunmap_local(pgdata);
SetPageUptodate(page);
unlock_page(page);
return 0;
err:
kunmap_local(pgdata);
ClearPageUptodate(page);
SetPageError(page);
unlock_page(page);
return 0;
}
static const struct address_space_operations cramfs_aops = {
.read_folio = cramfs_read_folio
};
/*
* Our operations:
*/
/*
* A directory can only readdir
*/
static const struct file_operations cramfs_directory_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = cramfs_readdir,
};
static const struct inode_operations cramfs_dir_inode_operations = {
.lookup = cramfs_lookup,
};
static const struct super_operations cramfs_ops = {
.statfs = cramfs_statfs,
};
static int cramfs_get_tree(struct fs_context *fc)
{
int ret = -ENOPROTOOPT;
if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
ret = get_tree_mtd(fc, cramfs_mtd_fill_super);
if (!ret)
return 0;
}
if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
ret = get_tree_bdev(fc, cramfs_blkdev_fill_super);
return ret;
}
static const struct fs_context_operations cramfs_context_ops = {
.get_tree = cramfs_get_tree,
.reconfigure = cramfs_reconfigure,
};
/*
* Set up the filesystem mount context.
*/
static int cramfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &cramfs_context_ops;
return 0;
}
static struct file_system_type cramfs_fs_type = {
.owner = THIS_MODULE,
.name = "cramfs",
.init_fs_context = cramfs_init_fs_context,
.kill_sb = cramfs_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("cramfs");
static int __init init_cramfs_fs(void)
{
int rv;
rv = cramfs_uncompress_init();
if (rv < 0)
return rv;
rv = register_filesystem(&cramfs_fs_type);
if (rv < 0)
cramfs_uncompress_exit();
return rv;
}
static void __exit exit_cramfs_fs(void)
{
cramfs_uncompress_exit();
unregister_filesystem(&cramfs_fs_type);
}
module_init(init_cramfs_fs)
module_exit(exit_cramfs_fs)
MODULE_LICENSE("GPL");
| linux-master | fs/cramfs/inode.c |
// SPDX-License-Identifier: GPL-2.0-only
/* fs/fat/nfs.c
*/
#include <linux/exportfs.h>
#include "fat.h"
struct fat_fid {
u32 i_gen;
u32 i_pos_low;
u16 i_pos_hi;
u16 parent_i_pos_hi;
u32 parent_i_pos_low;
u32 parent_i_gen;
};
#define FAT_FID_SIZE_WITHOUT_PARENT 3
#define FAT_FID_SIZE_WITH_PARENT (sizeof(struct fat_fid)/sizeof(u32))
/*
* Look up a directory inode given its starting cluster.
*/
static struct inode *fat_dget(struct super_block *sb, int i_logstart)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct hlist_head *head;
struct msdos_inode_info *i;
struct inode *inode = NULL;
head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
spin_lock(&sbi->dir_hash_lock);
hlist_for_each_entry(i, head, i_dir_hash) {
BUG_ON(i->vfs_inode.i_sb != sb);
if (i->i_logstart != i_logstart)
continue;
inode = igrab(&i->vfs_inode);
if (inode)
break;
}
spin_unlock(&sbi->dir_hash_lock);
return inode;
}
static struct inode *fat_ilookup(struct super_block *sb, u64 ino, loff_t i_pos)
{
if (MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO)
return fat_iget(sb, i_pos);
else {
if ((ino < MSDOS_ROOT_INO) || (ino == MSDOS_FSINFO_INO))
return NULL;
return ilookup(sb, ino);
}
}
static struct inode *__fat_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation, loff_t i_pos)
{
struct inode *inode = fat_ilookup(sb, ino, i_pos);
if (inode && generation && (inode->i_generation != generation)) {
iput(inode);
inode = NULL;
}
if (inode == NULL && MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) {
struct buffer_head *bh = NULL;
struct msdos_dir_entry *de ;
sector_t blocknr;
int offset;
fat_get_blknr_offset(MSDOS_SB(sb), i_pos, &blocknr, &offset);
bh = sb_bread(sb, blocknr);
if (!bh) {
fat_msg(sb, KERN_ERR,
"unable to read block(%llu) for building NFS inode",
(llu)blocknr);
return inode;
}
de = (struct msdos_dir_entry *)bh->b_data;
/* If a file is deleted on server and client is not updated
* yet, we must not build the inode upon a lookup call.
*/
if (IS_FREE(de[offset].name))
inode = NULL;
else
inode = fat_build_inode(sb, &de[offset], i_pos);
brelse(bh);
}
return inode;
}
static struct inode *fat_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
return __fat_nfs_get_inode(sb, ino, generation, 0);
}
static int
fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
struct inode *parent)
{
int len = *lenp;
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
struct fat_fid *fid = (struct fat_fid *) fh;
loff_t i_pos;
int type = FILEID_FAT_WITHOUT_PARENT;
if (parent) {
if (len < FAT_FID_SIZE_WITH_PARENT) {
*lenp = FAT_FID_SIZE_WITH_PARENT;
return FILEID_INVALID;
}
} else {
if (len < FAT_FID_SIZE_WITHOUT_PARENT) {
*lenp = FAT_FID_SIZE_WITHOUT_PARENT;
return FILEID_INVALID;
}
}
i_pos = fat_i_pos_read(sbi, inode);
*lenp = FAT_FID_SIZE_WITHOUT_PARENT;
fid->i_gen = inode->i_generation;
fid->i_pos_low = i_pos & 0xFFFFFFFF;
fid->i_pos_hi = (i_pos >> 32) & 0xFFFF;
if (parent) {
i_pos = fat_i_pos_read(sbi, parent);
fid->parent_i_pos_hi = (i_pos >> 32) & 0xFFFF;
fid->parent_i_pos_low = i_pos & 0xFFFFFFFF;
fid->parent_i_gen = parent->i_generation;
type = FILEID_FAT_WITH_PARENT;
*lenp = FAT_FID_SIZE_WITH_PARENT;
}
return type;
}
/*
* Map a NFS file handle to a corresponding dentry.
* The dentry may or may not be connected to the filesystem root.
*/
static struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
fat_nfs_get_inode);
}
static struct dentry *fat_fh_to_dentry_nostale(struct super_block *sb,
struct fid *fh, int fh_len,
int fh_type)
{
struct inode *inode = NULL;
struct fat_fid *fid = (struct fat_fid *)fh;
loff_t i_pos;
switch (fh_type) {
case FILEID_FAT_WITHOUT_PARENT:
if (fh_len < FAT_FID_SIZE_WITHOUT_PARENT)
return NULL;
break;
case FILEID_FAT_WITH_PARENT:
if (fh_len < FAT_FID_SIZE_WITH_PARENT)
return NULL;
break;
default:
return NULL;
}
i_pos = fid->i_pos_hi;
i_pos = (i_pos << 32) | (fid->i_pos_low);
inode = __fat_nfs_get_inode(sb, 0, fid->i_gen, i_pos);
return d_obtain_alias(inode);
}
/*
* Find the parent for a file specified by NFS handle.
* This requires that the handle contain the i_ino of the parent.
*/
static struct dentry *fat_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
fat_nfs_get_inode);
}
static struct dentry *fat_fh_to_parent_nostale(struct super_block *sb,
struct fid *fh, int fh_len,
int fh_type)
{
struct inode *inode = NULL;
struct fat_fid *fid = (struct fat_fid *)fh;
loff_t i_pos;
if (fh_len < FAT_FID_SIZE_WITH_PARENT)
return NULL;
switch (fh_type) {
case FILEID_FAT_WITH_PARENT:
i_pos = fid->parent_i_pos_hi;
i_pos = (i_pos << 32) | (fid->parent_i_pos_low);
inode = __fat_nfs_get_inode(sb, 0, fid->parent_i_gen, i_pos);
break;
}
return d_obtain_alias(inode);
}
/*
* Rebuild the parent for a directory that is not connected
* to the filesystem root
*/
static
struct inode *fat_rebuild_parent(struct super_block *sb, int parent_logstart)
{
int search_clus, clus_to_match;
struct msdos_dir_entry *de;
struct inode *parent = NULL;
struct inode *dummy_grand_parent = NULL;
struct fat_slot_info sinfo;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart);
struct buffer_head *parent_bh = sb_bread(sb, blknr);
if (!parent_bh) {
fat_msg(sb, KERN_ERR,
"unable to read cluster of parent directory");
return NULL;
}
de = (struct msdos_dir_entry *) parent_bh->b_data;
clus_to_match = fat_get_start(sbi, &de[0]);
search_clus = fat_get_start(sbi, &de[1]);
dummy_grand_parent = fat_dget(sb, search_clus);
if (!dummy_grand_parent) {
dummy_grand_parent = new_inode(sb);
if (!dummy_grand_parent) {
brelse(parent_bh);
return parent;
}
dummy_grand_parent->i_ino = iunique(sb, MSDOS_ROOT_INO);
fat_fill_inode(dummy_grand_parent, &de[1]);
MSDOS_I(dummy_grand_parent)->i_pos = -1;
}
if (!fat_scan_logstart(dummy_grand_parent, clus_to_match, &sinfo))
parent = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(parent_bh);
iput(dummy_grand_parent);
return parent;
}
/*
* Find the parent for a directory that is not currently connected to
* the filesystem root.
*
* On entry, the caller holds d_inode(child_dir)->i_mutex.
*/
static struct dentry *fat_get_parent(struct dentry *child_dir)
{
struct super_block *sb = child_dir->d_sb;
struct buffer_head *bh = NULL;
struct msdos_dir_entry *de;
struct inode *parent_inode = NULL;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
if (!fat_get_dotdot_entry(d_inode(child_dir), &bh, &de)) {
int parent_logstart = fat_get_start(sbi, de);
parent_inode = fat_dget(sb, parent_logstart);
if (!parent_inode && sbi->options.nfs == FAT_NFS_NOSTALE_RO)
parent_inode = fat_rebuild_parent(sb, parent_logstart);
}
brelse(bh);
return d_obtain_alias(parent_inode);
}
const struct export_operations fat_export_ops = {
.fh_to_dentry = fat_fh_to_dentry,
.fh_to_parent = fat_fh_to_parent,
.get_parent = fat_get_parent,
};
const struct export_operations fat_export_ops_nostale = {
.encode_fh = fat_encode_fh_nostale,
.fh_to_dentry = fat_fh_to_dentry_nostale,
.fh_to_parent = fat_fh_to_parent_nostale,
.get_parent = fat_get_parent,
};
| linux-master | fs/fat/nfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/msdos/namei.c
*
* Written 1992,1993 by Werner Almesberger
* Hidden files 1995 by Albert Cahalan <[email protected]> <[email protected]>
* Rewritten for constant inumbers 1999 by Al Viro
*/
#include <linux/module.h>
#include <linux/iversion.h>
#include "fat.h"
/* Characters that are undesirable in an MS-DOS file name */
static unsigned char bad_chars[] = "*?<>|\"";
static unsigned char bad_if_strict[] = "+=,; ";
/***** Formats an MS-DOS file name. Rejects invalid names. */
static int msdos_format_name(const unsigned char *name, int len,
unsigned char *res, struct fat_mount_options *opts)
/*
* name is the proposed name, len is its length, res is
* the resulting name, opts->name_check is either (r)elaxed,
* (n)ormal or (s)trict, opts->dotsOK allows dots at the
* beginning of name (for hidden files)
*/
{
unsigned char *walk;
unsigned char c;
int space;
if (name[0] == '.') { /* dotfile because . and .. already done */
if (opts->dotsOK) {
/* Get rid of dot - test for it elsewhere */
name++;
len--;
} else
return -EINVAL;
}
/*
* disallow names that _really_ start with a dot
*/
space = 1;
c = 0;
for (walk = res; len && walk - res < 8; walk++) {
c = *name++;
len--;
if (opts->name_check != 'r' && strchr(bad_chars, c))
return -EINVAL;
if (opts->name_check == 's' && strchr(bad_if_strict, c))
return -EINVAL;
if (c >= 'A' && c <= 'Z' && opts->name_check == 's')
return -EINVAL;
if (c < ' ' || c == ':' || c == '\\')
return -EINVAL;
/*
* 0xE5 is legal as a first character, but we must substitute
* 0x05 because 0xE5 marks deleted files. Yes, DOS really
* does this.
* It seems that Microsoft hacked DOS to support non-US
* characters after the 0xE5 character was already in use to
* mark deleted files.
*/
if ((res == walk) && (c == 0xE5))
c = 0x05;
if (c == '.')
break;
space = (c == ' ');
*walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c;
}
if (space)
return -EINVAL;
if (opts->name_check == 's' && len && c != '.') {
c = *name++;
len--;
if (c != '.')
return -EINVAL;
}
while (c != '.' && len--)
c = *name++;
if (c == '.') {
while (walk - res < 8)
*walk++ = ' ';
while (len > 0 && walk - res < MSDOS_NAME) {
c = *name++;
len--;
if (opts->name_check != 'r' && strchr(bad_chars, c))
return -EINVAL;
if (opts->name_check == 's' &&
strchr(bad_if_strict, c))
return -EINVAL;
if (c < ' ' || c == ':' || c == '\\')
return -EINVAL;
if (c == '.') {
if (opts->name_check == 's')
return -EINVAL;
break;
}
if (c >= 'A' && c <= 'Z' && opts->name_check == 's')
return -EINVAL;
space = c == ' ';
if (!opts->nocase && c >= 'a' && c <= 'z')
*walk++ = c - 32;
else
*walk++ = c;
}
if (space)
return -EINVAL;
if (opts->name_check == 's' && len)
return -EINVAL;
}
while (walk - res < MSDOS_NAME)
*walk++ = ' ';
return 0;
}
/***** Locates a directory entry. Uses unformatted name. */
static int msdos_find(struct inode *dir, const unsigned char *name, int len,
struct fat_slot_info *sinfo)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
unsigned char msdos_name[MSDOS_NAME];
int err;
err = msdos_format_name(name, len, msdos_name, &sbi->options);
if (err)
return -ENOENT;
err = fat_scan(dir, msdos_name, sinfo);
if (!err && sbi->options.dotsOK) {
if (name[0] == '.') {
if (!(sinfo->de->attr & ATTR_HIDDEN))
err = -ENOENT;
} else {
if (sinfo->de->attr & ATTR_HIDDEN)
err = -ENOENT;
}
if (err)
brelse(sinfo->bh);
}
return err;
}
/*
* Compute the hash for the msdos name corresponding to the dentry.
* Note: if the name is invalid, we leave the hash code unchanged so
* that the existing dentry can be used. The msdos fs routines will
* return ENOENT or EINVAL as appropriate.
*/
static int msdos_hash(const struct dentry *dentry, struct qstr *qstr)
{
struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
unsigned char msdos_name[MSDOS_NAME];
int error;
error = msdos_format_name(qstr->name, qstr->len, msdos_name, options);
if (!error)
qstr->hash = full_name_hash(dentry, msdos_name, MSDOS_NAME);
return 0;
}
/*
* Compare two msdos names. If either of the names are invalid,
* we fall back to doing the standard name comparison.
*/
static int msdos_cmp(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME];
int error;
error = msdos_format_name(name->name, name->len, a_msdos_name, options);
if (error)
goto old_compare;
error = msdos_format_name(str, len, b_msdos_name, options);
if (error)
goto old_compare;
error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME);
out:
return error;
old_compare:
error = 1;
if (name->len == len)
error = memcmp(name->name, str, len);
goto out;
}
static const struct dentry_operations msdos_dentry_operations = {
.d_hash = msdos_hash,
.d_compare = msdos_cmp,
};
/*
* AV. Wrappers for FAT sb operations. Is it wise?
*/
/***** Get inode using directory and name */
static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
struct inode *inode;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
switch (err) {
case -ENOENT:
inode = NULL;
break;
case 0:
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
break;
default:
inode = ERR_PTR(err);
}
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return d_splice_alias(inode, dentry);
}
/***** Creates a directory entry (name is already formatted). */
static int msdos_add_entry(struct inode *dir, const unsigned char *name,
int is_dir, int is_hid, int cluster,
struct timespec64 *ts, struct fat_slot_info *sinfo)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct msdos_dir_entry de;
__le16 time, date;
int err;
memcpy(de.name, name, MSDOS_NAME);
de.attr = is_dir ? ATTR_DIR : ATTR_ARCH;
if (is_hid)
de.attr |= ATTR_HIDDEN;
de.lcase = 0;
fat_time_unix2fat(sbi, ts, &time, &date, NULL);
de.cdate = de.adate = 0;
de.ctime = 0;
de.ctime_cs = 0;
de.time = time;
de.date = date;
fat_set_start(&de, cluster);
de.size = 0;
err = fat_add_entries(dir, &de, 1, sinfo);
if (err)
return err;
fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
return 0;
}
/***** Create a file */
static int msdos_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = NULL;
struct fat_slot_info sinfo;
struct timespec64 ts;
unsigned char msdos_name[MSDOS_NAME];
int err, is_hid;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_format_name(dentry->d_name.name, dentry->d_name.len,
msdos_name, &MSDOS_SB(sb)->options);
if (err)
goto out;
is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.');
/* Have to do it due to foo vs. .foo conflicts */
if (!fat_scan(dir, msdos_name, &sinfo)) {
brelse(sinfo.bh);
err = -EINVAL;
goto out;
}
ts = current_time(dir);
err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &ts, &sinfo);
if (err)
goto out;
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
}
fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, dir, inode);
return err;
}
/***** Remove a directory */
static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = d_inode(dentry);
struct fat_slot_info sinfo;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = fat_dir_empty(inode);
if (err)
goto out;
err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
if (err)
goto out;
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err)
goto out;
drop_nlink(dir);
clear_nlink(inode);
fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, dir, inode);
return err;
}
/***** Make a directory */
static int msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
struct inode *inode;
unsigned char msdos_name[MSDOS_NAME];
struct timespec64 ts;
int err, is_hid, cluster;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_format_name(dentry->d_name.name, dentry->d_name.len,
msdos_name, &MSDOS_SB(sb)->options);
if (err)
goto out;
is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.');
/* foo vs .foo situation */
if (!fat_scan(dir, msdos_name, &sinfo)) {
brelse(sinfo.bh);
err = -EINVAL;
goto out;
}
ts = current_time(dir);
cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
goto out;
}
err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &ts, &sinfo);
if (err)
goto out_free;
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
/* the directory was completed, just return a error */
goto out;
}
set_nlink(inode, 2);
fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
fat_flush_inodes(sb, dir, inode);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
/***** Unlink a file */
static int msdos_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct super_block *sb = inode->i_sb;
struct fat_slot_info sinfo;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
if (err)
goto out;
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err)
goto out;
clear_nlink(inode);
fat_truncate_time(inode, NULL, S_CTIME);
fat_detach(inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, dir, inode);
return err;
}
static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
struct dentry *old_dentry,
struct inode *new_dir, unsigned char *new_name,
struct dentry *new_dentry, int is_hid)
{
struct buffer_head *dotdot_bh;
struct msdos_dir_entry *dotdot_de;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec64 ts;
loff_t new_i_pos;
int err, old_attrs, is_dir, update_dotdot, corrupt = 0;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
err = fat_scan(old_dir, old_name, &old_sinfo);
if (err) {
err = -EIO;
goto out;
}
is_dir = S_ISDIR(old_inode->i_mode);
update_dotdot = (is_dir && old_dir != new_dir);
if (update_dotdot) {
if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) {
err = -EIO;
goto out;
}
}
old_attrs = MSDOS_I(old_inode)->i_attrs;
err = fat_scan(new_dir, new_name, &sinfo);
if (!err) {
if (!new_inode) {
/* "foo" -> ".foo" case. just change the ATTR_HIDDEN */
if (sinfo.de != old_sinfo.de) {
err = -EINVAL;
goto out;
}
if (is_hid)
MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN;
else
MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN;
if (IS_DIRSYNC(old_dir)) {
err = fat_sync_inode(old_inode);
if (err) {
MSDOS_I(old_inode)->i_attrs = old_attrs;
goto out;
}
} else
mark_inode_dirty(old_inode);
inode_inc_iversion(old_dir);
fat_truncate_time(old_dir, NULL, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
mark_inode_dirty(old_dir);
goto out;
}
}
ts = current_time(old_inode);
if (new_inode) {
if (err)
goto out;
if (is_dir) {
err = fat_dir_empty(new_inode);
if (err)
goto out;
}
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0,
&ts, &sinfo);
if (err)
goto out;
new_i_pos = sinfo.i_pos;
}
inode_inc_iversion(new_dir);
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
if (is_hid)
MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN;
else
MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN;
if (IS_DIRSYNC(new_dir)) {
err = fat_sync_inode(old_inode);
if (err)
goto error_inode;
} else
mark_inode_dirty(old_inode);
if (update_dotdot) {
fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
if (IS_DIRSYNC(new_dir)) {
err = sync_dirty_buffer(dotdot_bh);
if (err)
goto error_dotdot;
}
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
}
err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
inode_inc_iversion(old_dir);
fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
mark_inode_dirty(old_dir);
if (new_inode) {
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
brelse(dotdot_bh);
brelse(old_sinfo.bh);
return err;
error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
if (update_dotdot) {
fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
corrupt |= sync_dirty_buffer(dotdot_bh);
}
error_inode:
fat_detach(old_inode);
fat_attach(old_inode, old_sinfo.i_pos);
MSDOS_I(old_inode)->i_attrs = old_attrs;
if (new_inode) {
fat_attach(new_inode, new_i_pos);
if (corrupt)
corrupt |= fat_sync_inode(new_inode);
} else {
/*
* If new entry was not sharing the data cluster, it
* shouldn't be serious corruption.
*/
int err2 = fat_remove_entries(new_dir, &sinfo);
if (corrupt)
corrupt |= err2;
sinfo.bh = NULL;
}
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
}
goto out;
}
/***** Rename, a wrapper for rename_same_dir & rename_diff_dir */
static int msdos_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct super_block *sb = old_dir->i_sb;
unsigned char old_msdos_name[MSDOS_NAME], new_msdos_name[MSDOS_NAME];
int err, is_hid;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_format_name(old_dentry->d_name.name,
old_dentry->d_name.len, old_msdos_name,
&MSDOS_SB(old_dir->i_sb)->options);
if (err)
goto out;
err = msdos_format_name(new_dentry->d_name.name,
new_dentry->d_name.len, new_msdos_name,
&MSDOS_SB(new_dir->i_sb)->options);
if (err)
goto out;
is_hid =
(new_dentry->d_name.name[0] == '.') && (new_msdos_name[0] != '.');
err = do_msdos_rename(old_dir, old_msdos_name, old_dentry,
new_dir, new_msdos_name, new_dentry, is_hid);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, old_dir, new_dir);
return err;
}
static const struct inode_operations msdos_dir_inode_operations = {
.create = msdos_create,
.lookup = msdos_lookup,
.unlink = msdos_unlink,
.mkdir = msdos_mkdir,
.rmdir = msdos_rmdir,
.rename = msdos_rename,
.setattr = fat_setattr,
.getattr = fat_getattr,
.update_time = fat_update_time,
};
static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
sb->s_d_op = &msdos_dentry_operations;
sb->s_flags |= SB_NOATIME;
}
static int msdos_fill_super(struct super_block *sb, void *data, int silent)
{
return fat_fill_super(sb, data, silent, 0, setup);
}
static struct dentry *msdos_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, msdos_fill_super);
}
static struct file_system_type msdos_fs_type = {
.owner = THIS_MODULE,
.name = "msdos",
.mount = msdos_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("msdos");
static int __init init_msdos_fs(void)
{
return register_filesystem(&msdos_fs_type);
}
static void __exit exit_msdos_fs(void)
{
unregister_filesystem(&msdos_fs_type);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Werner Almesberger");
MODULE_DESCRIPTION("MS-DOS filesystem support");
module_init(init_msdos_fs)
module_exit(exit_msdos_fs)
| linux-master | fs/fat/namei_msdos.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/vfat/namei.c
*
* Written 1992,1993 by Werner Almesberger
*
* Windows95/Windows NT compatible extended MSDOS filesystem
* by Gordon Chaffee Copyright (C) 1995. Send bug reports for the
* VFAT filesystem to <[email protected]>. Specify
* what file operation caused you trouble and if you can duplicate
* the problem, send a script that demonstrates it.
*
* Short name translation 1999, 2001 by Wolfram Pienkoss <[email protected]>
*
* Support Multibyte characters and cleanup by
* OGAWA Hirofumi <[email protected]>
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/kernel.h>
#include <linux/iversion.h>
#include "fat.h"
static inline unsigned long vfat_d_version(struct dentry *dentry)
{
return (unsigned long) dentry->d_fsdata;
}
static inline void vfat_d_version_set(struct dentry *dentry,
unsigned long version)
{
dentry->d_fsdata = (void *) version;
}
/*
* If new entry was created in the parent, it could create the 8.3
* alias (the shortname of logname). So, the parent may have the
* negative-dentry which matches the created 8.3 alias.
*
* If it happened, the negative dentry isn't actually negative
* anymore. So, drop it.
*/
static int vfat_revalidate_shortname(struct dentry *dentry)
{
int ret = 1;
spin_lock(&dentry->d_lock);
if (!inode_eq_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry)))
ret = 0;
spin_unlock(&dentry->d_lock);
return ret;
}
static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
/* This is not negative dentry. Always valid. */
if (d_really_is_positive(dentry))
return 1;
return vfat_revalidate_shortname(dentry);
}
static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
/*
* This is not negative dentry. Always valid.
*
* Note, rename() to existing directory entry will have ->d_inode,
* and will use existing name which isn't specified name by user.
*
* We may be able to drop this positive dentry here. But dropping
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
if (d_really_is_positive(dentry))
return 1;
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!flags)
return 0;
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
return vfat_revalidate_shortname(dentry);
}
/* returns the length of a struct qstr, ignoring trailing dots */
static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
{
while (len && name[len - 1] == '.')
len--;
return len;
}
static unsigned int vfat_striptail_len(const struct qstr *qstr)
{
return __vfat_striptail_len(qstr->len, qstr->name);
}
/*
* Compute the hash for the vfat name corresponding to the dentry.
* Note: if the name is invalid, we leave the hash code unchanged so
* that the existing dentry can be used. The vfat fs routines will
* return ENOENT or EINVAL as appropriate.
*/
static int vfat_hash(const struct dentry *dentry, struct qstr *qstr)
{
qstr->hash = full_name_hash(dentry, qstr->name, vfat_striptail_len(qstr));
return 0;
}
/*
* Compute the hash for the vfat name corresponding to the dentry.
* Note: if the name is invalid, we leave the hash code unchanged so
* that the existing dentry can be used. The vfat fs routines will
* return ENOENT or EINVAL as appropriate.
*/
static int vfat_hashi(const struct dentry *dentry, struct qstr *qstr)
{
struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
const unsigned char *name;
unsigned int len;
unsigned long hash;
name = qstr->name;
len = vfat_striptail_len(qstr);
hash = init_name_hash(dentry);
while (len--)
hash = partial_name_hash(nls_tolower(t, *name++), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
/*
* Case insensitive compare of two vfat names.
*/
static int vfat_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = vfat_striptail_len(name);
blen = __vfat_striptail_len(len, str);
if (alen == blen) {
if (nls_strnicmp(t, name->name, str, alen) == 0)
return 0;
}
return 1;
}
/*
* Case sensitive compare of two vfat names.
*/
static int vfat_cmp(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = vfat_striptail_len(name);
blen = __vfat_striptail_len(len, str);
if (alen == blen) {
if (strncmp(name->name, str, alen) == 0)
return 0;
}
return 1;
}
static const struct dentry_operations vfat_ci_dentry_ops = {
.d_revalidate = vfat_revalidate_ci,
.d_hash = vfat_hashi,
.d_compare = vfat_cmpi,
};
static const struct dentry_operations vfat_dentry_ops = {
.d_revalidate = vfat_revalidate,
.d_hash = vfat_hash,
.d_compare = vfat_cmp,
};
/* Characters that are undesirable in an MS-DOS file name */
static inline bool vfat_bad_char(wchar_t w)
{
return (w < 0x0020)
|| (w == '*') || (w == '?') || (w == '<') || (w == '>')
|| (w == '|') || (w == '"') || (w == ':') || (w == '/')
|| (w == '\\');
}
static inline bool vfat_replace_char(wchar_t w)
{
return (w == '[') || (w == ']') || (w == ';') || (w == ',')
|| (w == '+') || (w == '=');
}
static wchar_t vfat_skip_char(wchar_t w)
{
return (w == '.') || (w == ' ');
}
static inline int vfat_is_used_badchars(const wchar_t *s, int len)
{
int i;
for (i = 0; i < len; i++)
if (vfat_bad_char(s[i]))
return -EINVAL;
if (s[i - 1] == ' ') /* last character cannot be space */
return -EINVAL;
return 0;
}
static int vfat_find_form(struct inode *dir, unsigned char *name)
{
struct fat_slot_info sinfo;
int err = fat_scan(dir, name, &sinfo);
if (err)
return -ENOENT;
brelse(sinfo.bh);
return 0;
}
/*
* 1) Valid characters for the 8.3 format alias are any combination of
* letters, uppercase alphabets, digits, any of the
* following special characters:
* $ % ' ` - @ { } ~ ! # ( ) & _ ^
* In this case Longfilename is not stored in disk.
*
* WinNT's Extension:
* File name and extension name is contain uppercase/lowercase
* only. And it is expressed by CASE_LOWER_BASE and CASE_LOWER_EXT.
*
* 2) File name is 8.3 format, but it contain the uppercase and
* lowercase char, muliti bytes char, etc. In this case numtail is not
* added, but Longfilename is stored.
*
* 3) When the one except for the above, or the following special
* character are contained:
* . [ ] ; , + =
* numtail is added, and Longfilename must be stored in disk .
*/
struct shortname_info {
unsigned char lower:1,
upper:1,
valid:1;
};
#define INIT_SHORTNAME_INFO(x) do { \
(x)->lower = 1; \
(x)->upper = 1; \
(x)->valid = 1; \
} while (0)
static inline int to_shortname_char(struct nls_table *nls,
unsigned char *buf, int buf_size,
wchar_t *src, struct shortname_info *info)
{
int len;
if (vfat_skip_char(*src)) {
info->valid = 0;
return 0;
}
if (vfat_replace_char(*src)) {
info->valid = 0;
buf[0] = '_';
return 1;
}
len = nls->uni2char(*src, buf, buf_size);
if (len <= 0) {
info->valid = 0;
buf[0] = '_';
len = 1;
} else if (len == 1) {
unsigned char prev = buf[0];
if (buf[0] >= 0x7F) {
info->lower = 0;
info->upper = 0;
}
buf[0] = nls_toupper(nls, buf[0]);
if (isalpha(buf[0])) {
if (buf[0] == prev)
info->lower = 0;
else
info->upper = 0;
}
} else {
info->lower = 0;
info->upper = 0;
}
return len;
}
/*
* Given a valid longname, create a unique shortname. Make sure the
* shortname does not exist
* Returns negative number on error, 0 for a normal
* return, and 1 for valid shortname
*/
static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
wchar_t *uname, int ulen,
unsigned char *name_res, unsigned char *lcase)
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
int is_shortname;
struct shortname_info base_info, ext_info;
is_shortname = 1;
INIT_SHORTNAME_INFO(&base_info);
INIT_SHORTNAME_INFO(&ext_info);
/* Now, we need to create a shortname from the long name */
ext_start = end = &uname[ulen];
while (--ext_start >= uname) {
if (*ext_start == 0x002E) { /* is `.' */
if (ext_start == end - 1) {
sz = ulen;
ext_start = NULL;
}
break;
}
}
if (ext_start == uname - 1) {
sz = ulen;
ext_start = NULL;
} else if (ext_start) {
/*
* Names which start with a dot could be just
* an extension eg. "...test". In this case Win95
* uses the extension as the name and sets no extension.
*/
name_start = &uname[0];
while (name_start < ext_start) {
if (!vfat_skip_char(*name_start))
break;
name_start++;
}
if (name_start != ext_start) {
sz = ext_start - uname;
ext_start++;
} else {
sz = ulen;
ext_start = NULL;
}
}
numtail_baselen = 6;
numtail2_baselen = 2;
for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &base_info);
if (chl == 0)
continue;
if (baselen < 2 && (baselen + chl) > 2)
numtail2_baselen = baselen;
if (baselen < 6 && (baselen + chl) > 6)
numtail_baselen = baselen;
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
baselen++;
if (baselen >= 8)
break;
}
if (baselen >= 8) {
if ((chi < chl - 1) || (ip + 1) - uname < sz)
is_shortname = 0;
break;
}
}
if (baselen == 0) {
return -EINVAL;
}
extlen = 0;
if (ext_start) {
for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &ext_info);
if (chl == 0)
continue;
if ((extlen + chl) > 3) {
is_shortname = 0;
break;
}
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
extlen++;
}
if (extlen >= 3) {
if (ip + 1 != end)
is_shortname = 0;
break;
}
}
}
ext[extlen] = '\0';
base[baselen] = '\0';
/* Yes, it can happen. ".\xe5" would do it. */
if (base[0] == DELETED_FLAG)
base[0] = 0x05;
/* OK, at this point we know that base is not longer than 8 symbols,
* ext is not longer than 3, base is nonempty, both don't contain
* any bad symbols (lowercase transformed to uppercase).
*/
memset(name_res, ' ', MSDOS_NAME);
memcpy(name_res, base, baselen);
memcpy(name_res + 8, ext, extlen);
*lcase = 0;
if (is_shortname && base_info.valid && ext_info.valid) {
if (vfat_find_form(dir, name_res) == 0)
return -EEXIST;
if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
return (base_info.upper && ext_info.upper);
} else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
if ((base_info.upper || base_info.lower) &&
(ext_info.upper || ext_info.lower)) {
if (!base_info.upper && base_info.lower)
*lcase |= CASE_LOWER_BASE;
if (!ext_info.upper && ext_info.lower)
*lcase |= CASE_LOWER_EXT;
return 1;
}
return 0;
} else {
BUG();
}
}
if (opts->numtail == 0)
if (vfat_find_form(dir, name_res) < 0)
return 0;
/*
* Try to find a unique extension. This used to
* iterate through all possibilities sequentially,
* but that gave extremely bad performance. Windows
* only tries a few cases before using random
* values for part of the base.
*/
if (baselen > 6) {
baselen = numtail_baselen;
name_res[7] = ' ';
}
name_res[baselen] = '~';
for (i = 1; i < 10; i++) {
name_res[baselen + 1] = i + '0';
if (vfat_find_form(dir, name_res) < 0)
return 0;
}
i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
name_res[7] = ' ';
}
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
i -= 11;
}
return 0;
}
/* Translate a string, including coded sequences into Unicode */
static int
xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int *longlen, int *outlen, int escape, int utf8,
struct nls_table *nls)
{
const unsigned char *ip;
unsigned char *op;
int i, fill;
int charlen;
if (utf8) {
*outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
(wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN)
return -ENAMETOOLONG;
op = &outname[*outlen * sizeof(wchar_t)];
} else {
for (i = 0, ip = name, op = outname, *outlen = 0;
i < len && *outlen < FAT_LFN_LEN;
*outlen += 1) {
if (escape && (*ip == ':')) {
u8 uc[2];
if (i > len - 5)
return -EINVAL;
if (hex2bin(uc, ip + 1, 2) < 0)
return -EINVAL;
*(wchar_t *)op = uc[0] << 8 | uc[1];
op += 2;
ip += 5;
i += 5;
} else {
charlen = nls->char2uni(ip, len - i,
(wchar_t *)op);
if (charlen < 0)
return -EINVAL;
ip += charlen;
i += charlen;
op += 2;
}
}
if (i < len)
return -ENAMETOOLONG;
}
*longlen = *outlen;
if (*outlen % 13) {
*op++ = 0;
*op++ = 0;
*outlen += 1;
if (*outlen % 13) {
fill = 13 - (*outlen % 13);
for (i = 0; i < fill; i++) {
*op++ = 0xff;
*op++ = 0xff;
}
*outlen += fill;
}
}
return 0;
}
static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
struct timespec64 *ts,
struct msdos_dir_slot *slots, int *nr_slots)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct fat_mount_options *opts = &sbi->options;
struct msdos_dir_slot *ps;
struct msdos_dir_entry *de;
unsigned char cksum, lcase;
unsigned char msdos_name[MSDOS_NAME];
wchar_t *uname;
__le16 time, date;
u8 time_cs;
int err, ulen, usize, i;
loff_t offset;
*nr_slots = 0;
uname = __getname();
if (!uname)
return -ENOMEM;
err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
opts->unicode_xlate, opts->utf8, sbi->nls_io);
if (err)
goto out_free;
err = vfat_is_used_badchars(uname, ulen);
if (err)
goto out_free;
err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen,
msdos_name, &lcase);
if (err < 0)
goto out_free;
else if (err == 1) {
de = (struct msdos_dir_entry *)slots;
err = 0;
goto shortname;
}
/* build the entry of long file name */
cksum = fat_checksum(msdos_name);
*nr_slots = usize / 13;
for (ps = slots, i = *nr_slots; i > 0; i--, ps++) {
ps->id = i;
ps->attr = ATTR_EXT;
ps->reserved = 0;
ps->alias_checksum = cksum;
ps->start = 0;
offset = (i - 1) * 13;
fatwchar_to16(ps->name0_4, uname + offset, 5);
fatwchar_to16(ps->name5_10, uname + offset + 5, 6);
fatwchar_to16(ps->name11_12, uname + offset + 11, 2);
}
slots[0].id |= 0x40;
de = (struct msdos_dir_entry *)ps;
shortname:
/* build the entry of 8.3 alias name */
(*nr_slots)++;
memcpy(de->name, msdos_name, MSDOS_NAME);
de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
de->lcase = lcase;
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de->time = de->ctime = time;
de->date = de->cdate = de->adate = date;
de->ctime_cs = time_cs;
fat_set_start(de, cluster);
de->size = 0;
out_free:
__putname(uname);
return err;
}
static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
int is_dir, int cluster, struct timespec64 *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
unsigned int len;
int err, nr_slots;
len = vfat_striptail_len(qname);
if (len == 0)
return -ENOENT;
slots = kmalloc_array(MSDOS_SLOTS, sizeof(*slots), GFP_NOFS);
if (slots == NULL)
return -ENOMEM;
err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts,
slots, &nr_slots);
if (err)
goto cleanup;
err = fat_add_entries(dir, slots, nr_slots, sinfo);
if (err)
goto cleanup;
/* update timestamp */
fat_truncate_time(dir, ts, S_CTIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
cleanup:
kfree(slots);
return err;
}
static int vfat_find(struct inode *dir, const struct qstr *qname,
struct fat_slot_info *sinfo)
{
unsigned int len = vfat_striptail_len(qname);
if (len == 0)
return -ENOENT;
return fat_search_long(dir, qname->name, len, sinfo);
}
static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
struct inode *inode;
struct dentry *alias;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
if (err == -ENOENT) {
inode = NULL;
goto out;
}
goto error;
}
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
alias = d_find_alias(inode);
/*
* Checking "alias->d_parent == dentry->d_parent" to make sure
* FS is not corrupted (especially double linked dir).
*/
if (alias && alias->d_parent == dentry->d_parent) {
/*
* This inode has non anonymous-DCACHE_DISCONNECTED
* dentry. This means, the user did ->lookup() by an
* another name (longname vs 8.3 alias of it) in past.
*
* Switch to new one for reason of locality if possible.
*/
if (!S_ISDIR(inode->i_mode))
d_move(alias, dentry);
iput(inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return alias;
} else
dput(alias);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!inode)
vfat_d_version_set(dentry, inode_query_iversion(dir));
return d_splice_alias(inode, dentry);
error:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return ERR_PTR(err);
}
static int vfat_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec64 ts;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
ts = current_time(dir);
err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
if (err)
goto out;
inode_inc_iversion(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
}
inode_inc_iversion(inode);
d_instantiate(dentry, inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = fat_dir_empty(inode);
if (err)
goto out;
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err)
goto out;
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err)
goto out;
drop_nlink(dir);
clear_nlink(inode);
fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
static int vfat_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err)
goto out;
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err)
goto out;
clear_nlink(inode);
fat_truncate_time(inode, NULL, S_ATIME|S_MTIME);
fat_detach(inode);
vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
static int vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec64 ts;
int err, cluster;
mutex_lock(&MSDOS_SB(sb)->s_lock);
ts = current_time(dir);
cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
goto out;
}
err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err)
goto out_free;
inode_inc_iversion(dir);
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
/* the directory was completed, just return a error */
goto out;
}
inode_inc_iversion(inode);
set_nlink(inode, 2);
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
static int vfat_get_dotdot_de(struct inode *inode, struct buffer_head **bh,
struct msdos_dir_entry **de)
{
if (S_ISDIR(inode->i_mode)) {
if (fat_get_dotdot_entry(inode, bh, de))
return -EIO;
}
return 0;
}
static int vfat_sync_ipos(struct inode *dir, struct inode *inode)
{
if (IS_DIRSYNC(dir))
return fat_sync_inode(inode);
mark_inode_dirty(inode);
return 0;
}
static int vfat_update_dotdot_de(struct inode *dir, struct inode *inode,
struct buffer_head *dotdot_bh,
struct msdos_dir_entry *dotdot_de)
{
fat_set_start(dotdot_de, MSDOS_I(dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, inode);
if (IS_DIRSYNC(dir))
return sync_dirty_buffer(dotdot_bh);
return 0;
}
static void vfat_update_dir_metadata(struct inode *dir, struct timespec64 *ts)
{
inode_inc_iversion(dir);
fat_truncate_time(dir, ts, S_CTIME | S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
}
static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *dotdot_bh;
struct msdos_dir_entry *dotdot_de = NULL;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec64 ts;
loff_t new_i_pos;
int err, is_dir, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
if (err)
goto out;
if (old_dir != new_dir) {
err = vfat_get_dotdot_de(old_inode, &dotdot_bh, &dotdot_de);
if (err)
goto out;
}
is_dir = S_ISDIR(old_inode->i_mode);
ts = current_time(old_dir);
if (new_inode) {
if (is_dir) {
err = fat_dir_empty(new_inode);
if (err)
goto out;
}
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
&ts, &sinfo);
if (err)
goto out;
new_i_pos = sinfo.i_pos;
}
inode_inc_iversion(new_dir);
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
err = vfat_sync_ipos(new_dir, old_inode);
if (err)
goto error_inode;
if (dotdot_de) {
err = vfat_update_dotdot_de(new_dir, old_inode, dotdot_bh,
dotdot_de);
if (err)
goto error_dotdot;
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
}
err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
vfat_update_dir_metadata(old_dir, &ts);
if (new_inode) {
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
fat_truncate_time(new_inode, &ts, S_CTIME);
}
out:
brelse(sinfo.bh);
brelse(dotdot_bh);
brelse(old_sinfo.bh);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
if (dotdot_de) {
corrupt |= vfat_update_dotdot_de(old_dir, old_inode, dotdot_bh,
dotdot_de);
}
error_inode:
fat_detach(old_inode);
fat_attach(old_inode, old_sinfo.i_pos);
if (new_inode) {
fat_attach(new_inode, new_i_pos);
if (corrupt)
corrupt |= fat_sync_inode(new_inode);
} else {
/*
* If new entry was not sharing the data cluster, it
* shouldn't be serious corruption.
*/
int err2 = fat_remove_entries(new_dir, &sinfo);
if (corrupt)
corrupt |= err2;
sinfo.bh = NULL;
}
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
}
goto out;
}
static void vfat_exchange_ipos(struct inode *old_inode, struct inode *new_inode,
loff_t old_i_pos, loff_t new_i_pos)
{
fat_detach(old_inode);
fat_detach(new_inode);
fat_attach(old_inode, new_i_pos);
fat_attach(new_inode, old_i_pos);
}
static void vfat_move_nlink(struct inode *src, struct inode *dst)
{
drop_nlink(src);
inc_nlink(dst);
}
static int vfat_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *old_dotdot_bh = NULL, *new_dotdot_bh = NULL;
struct msdos_dir_entry *old_dotdot_de = NULL, *new_dotdot_de = NULL;
struct inode *old_inode, *new_inode;
struct timespec64 ts = current_time(old_dir);
loff_t old_i_pos, new_i_pos;
int err, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
old_inode = d_inode(old_dentry);
new_inode = d_inode(new_dentry);
/* Acquire super block lock for the operation to be atomic */
mutex_lock(&MSDOS_SB(sb)->s_lock);
/* if directories are not the same, get ".." info to update */
if (old_dir != new_dir) {
err = vfat_get_dotdot_de(old_inode, &old_dotdot_bh,
&old_dotdot_de);
if (err)
goto out;
err = vfat_get_dotdot_de(new_inode, &new_dotdot_bh,
&new_dotdot_de);
if (err)
goto out;
}
old_i_pos = MSDOS_I(old_inode)->i_pos;
new_i_pos = MSDOS_I(new_inode)->i_pos;
vfat_exchange_ipos(old_inode, new_inode, old_i_pos, new_i_pos);
err = vfat_sync_ipos(old_dir, new_inode);
if (err)
goto error_exchange;
err = vfat_sync_ipos(new_dir, old_inode);
if (err)
goto error_exchange;
/* update ".." directory entry info */
if (old_dotdot_de) {
err = vfat_update_dotdot_de(new_dir, old_inode, old_dotdot_bh,
old_dotdot_de);
if (err)
goto error_old_dotdot;
}
if (new_dotdot_de) {
err = vfat_update_dotdot_de(old_dir, new_inode, new_dotdot_bh,
new_dotdot_de);
if (err)
goto error_new_dotdot;
}
/* if cross directory and only one is a directory, adjust nlink */
if (!old_dotdot_de != !new_dotdot_de) {
if (old_dotdot_de)
vfat_move_nlink(old_dir, new_dir);
else
vfat_move_nlink(new_dir, old_dir);
}
vfat_update_dir_metadata(old_dir, &ts);
/* if directories are not the same, update new_dir as well */
if (old_dir != new_dir)
vfat_update_dir_metadata(new_dir, &ts);
out:
brelse(old_dotdot_bh);
brelse(new_dotdot_bh);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
error_new_dotdot:
if (new_dotdot_de) {
corrupt |= vfat_update_dotdot_de(new_dir, new_inode,
new_dotdot_bh, new_dotdot_de);
}
error_old_dotdot:
if (old_dotdot_de) {
corrupt |= vfat_update_dotdot_de(old_dir, old_inode,
old_dotdot_bh, old_dotdot_de);
}
error_exchange:
vfat_exchange_ipos(old_inode, new_inode, new_i_pos, old_i_pos);
corrupt |= vfat_sync_ipos(new_dir, new_inode);
corrupt |= vfat_sync_ipos(old_dir, old_inode);
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld, %lld)",
__func__, old_i_pos, new_i_pos);
}
goto out;
}
static int vfat_rename2(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
return -EINVAL;
if (flags & RENAME_EXCHANGE) {
return vfat_rename_exchange(old_dir, old_dentry,
new_dir, new_dentry);
}
/* VFS already handled RENAME_NOREPLACE, handle it as a normal rename */
return vfat_rename(old_dir, old_dentry, new_dir, new_dentry);
}
static const struct inode_operations vfat_dir_inode_operations = {
.create = vfat_create,
.lookup = vfat_lookup,
.unlink = vfat_unlink,
.mkdir = vfat_mkdir,
.rmdir = vfat_rmdir,
.rename = vfat_rename2,
.setattr = fat_setattr,
.getattr = fat_getattr,
.update_time = fat_update_time,
};
static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations;
if (MSDOS_SB(sb)->options.name_check != 's')
sb->s_d_op = &vfat_ci_dentry_ops;
else
sb->s_d_op = &vfat_dentry_ops;
}
static int vfat_fill_super(struct super_block *sb, void *data, int silent)
{
return fat_fill_super(sb, data, silent, 1, setup);
}
static struct dentry *vfat_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
}
static struct file_system_type vfat_fs_type = {
.owner = THIS_MODULE,
.name = "vfat",
.mount = vfat_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("vfat");
static int __init init_vfat_fs(void)
{
return register_filesystem(&vfat_fs_type);
}
static void __exit exit_vfat_fs(void)
{
unregister_filesystem(&vfat_fs_type);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VFAT filesystem support");
MODULE_AUTHOR("Gordon Chaffee");
module_init(init_vfat_fs)
module_exit(exit_vfat_fs)
| linux-master | fs/fat/namei_vfat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit tests for FAT filesystems.
*
* Copyright (C) 2020 Google LLC.
* Author: David Gow <[email protected]>
*/
#include <kunit/test.h>
#include "fat.h"
static void fat_checksum_test(struct kunit *test)
{
/* With no extension. */
KUNIT_EXPECT_EQ(test, fat_checksum("VMLINUX "), (u8)44);
/* With 3-letter extension. */
KUNIT_EXPECT_EQ(test, fat_checksum("README TXT"), (u8)115);
/* With short (1-letter) extension. */
KUNIT_EXPECT_EQ(test, fat_checksum("ABCDEFGHA "), (u8)98);
}
struct fat_timestamp_testcase {
const char *name;
struct timespec64 ts;
__le16 time;
__le16 date;
u8 cs;
int time_offset;
};
static struct fat_timestamp_testcase time_test_cases[] = {
{
.name = "Earliest possible UTC (1980-01-01 00:00:00)",
.ts = {.tv_sec = 315532800LL, .tv_nsec = 0L},
.time = cpu_to_le16(0),
.date = cpu_to_le16(33),
.cs = 0,
.time_offset = 0,
},
{
.name = "Latest possible UTC (2107-12-31 23:59:58)",
.ts = {.tv_sec = 4354819198LL, .tv_nsec = 0L},
.time = cpu_to_le16(49021),
.date = cpu_to_le16(65439),
.cs = 0,
.time_offset = 0,
},
{
.name = "Earliest possible (UTC-11) (== 1979-12-31 13:00:00 UTC)",
.ts = {.tv_sec = 315493200LL, .tv_nsec = 0L},
.time = cpu_to_le16(0),
.date = cpu_to_le16(33),
.cs = 0,
.time_offset = 11 * 60,
},
{
.name = "Latest possible (UTC+11) (== 2108-01-01 10:59:58 UTC)",
.ts = {.tv_sec = 4354858798LL, .tv_nsec = 0L},
.time = cpu_to_le16(49021),
.date = cpu_to_le16(65439),
.cs = 0,
.time_offset = -11 * 60,
},
{
.name = "Leap Day / Year (1996-02-29 00:00:00)",
.ts = {.tv_sec = 825552000LL, .tv_nsec = 0L},
.time = cpu_to_le16(0),
.date = cpu_to_le16(8285),
.cs = 0,
.time_offset = 0,
},
{
.name = "Year 2000 is leap year (2000-02-29 00:00:00)",
.ts = {.tv_sec = 951782400LL, .tv_nsec = 0L},
.time = cpu_to_le16(0),
.date = cpu_to_le16(10333),
.cs = 0,
.time_offset = 0,
},
{
.name = "Year 2100 not leap year (2100-03-01 00:00:00)",
.ts = {.tv_sec = 4107542400LL, .tv_nsec = 0L},
.time = cpu_to_le16(0),
.date = cpu_to_le16(61537),
.cs = 0,
.time_offset = 0,
},
{
.name = "Leap year + timezone UTC+1 (== 2004-02-29 00:30:00 UTC)",
.ts = {.tv_sec = 1078014600LL, .tv_nsec = 0L},
.time = cpu_to_le16(48064),
.date = cpu_to_le16(12380),
.cs = 0,
.time_offset = -60,
},
{
.name = "Leap year + timezone UTC-1 (== 2004-02-29 23:30:00 UTC)",
.ts = {.tv_sec = 1078097400LL, .tv_nsec = 0L},
.time = cpu_to_le16(960),
.date = cpu_to_le16(12385),
.cs = 0,
.time_offset = 60,
},
{
.name = "VFAT odd-second resolution (1999-12-31 23:59:59)",
.ts = {.tv_sec = 946684799LL, .tv_nsec = 0L},
.time = cpu_to_le16(49021),
.date = cpu_to_le16(10143),
.cs = 100,
.time_offset = 0,
},
{
.name = "VFAT 10ms resolution (1980-01-01 00:00:00:0010)",
.ts = {.tv_sec = 315532800LL, .tv_nsec = 10000000L},
.time = cpu_to_le16(0),
.date = cpu_to_le16(33),
.cs = 1,
.time_offset = 0,
},
};
static void time_testcase_desc(struct fat_timestamp_testcase *t,
char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(fat_time, time_test_cases, time_testcase_desc);
static void fat_time_fat2unix_test(struct kunit *test)
{
static struct msdos_sb_info fake_sb;
struct timespec64 ts;
struct fat_timestamp_testcase *testcase =
(struct fat_timestamp_testcase *)test->param_value;
fake_sb.options.tz_set = 1;
fake_sb.options.time_offset = testcase->time_offset;
fat_time_fat2unix(&fake_sb, &ts,
testcase->time,
testcase->date,
testcase->cs);
KUNIT_EXPECT_EQ_MSG(test,
testcase->ts.tv_sec,
ts.tv_sec,
"Timestamp mismatch (seconds)\n");
KUNIT_EXPECT_EQ_MSG(test,
testcase->ts.tv_nsec,
ts.tv_nsec,
"Timestamp mismatch (nanoseconds)\n");
}
static void fat_time_unix2fat_test(struct kunit *test)
{
static struct msdos_sb_info fake_sb;
__le16 date, time;
u8 cs;
struct fat_timestamp_testcase *testcase =
(struct fat_timestamp_testcase *)test->param_value;
fake_sb.options.tz_set = 1;
fake_sb.options.time_offset = testcase->time_offset;
fat_time_unix2fat(&fake_sb, &testcase->ts,
&time, &date, &cs);
KUNIT_EXPECT_EQ_MSG(test,
le16_to_cpu(testcase->time),
le16_to_cpu(time),
"Time mismatch\n");
KUNIT_EXPECT_EQ_MSG(test,
le16_to_cpu(testcase->date),
le16_to_cpu(date),
"Date mismatch\n");
KUNIT_EXPECT_EQ_MSG(test,
testcase->cs,
cs,
"Centisecond mismatch\n");
}
static struct kunit_case fat_test_cases[] = {
KUNIT_CASE(fat_checksum_test),
KUNIT_CASE_PARAM(fat_time_fat2unix_test, fat_time_gen_params),
KUNIT_CASE_PARAM(fat_time_unix2fat_test, fat_time_gen_params),
{},
};
static struct kunit_suite fat_test_suite = {
.name = "fat_test",
.test_cases = fat_test_cases,
};
kunit_test_suites(&fat_test_suite);
MODULE_LICENSE("GPL v2");
| linux-master | fs/fat/fat_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, OGAWA Hirofumi
*/
#include <linux/blkdev.h>
#include <linux/sched/signal.h>
#include <linux/backing-dev-defs.h>
#include "fat.h"
struct fatent_operations {
void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
void (*ent_set_ptr)(struct fat_entry *, int);
int (*ent_bread)(struct super_block *, struct fat_entry *,
int, sector_t);
int (*ent_get)(struct fat_entry *);
void (*ent_put)(struct fat_entry *, int);
int (*ent_next)(struct fat_entry *);
};
static DEFINE_SPINLOCK(fat12_entry_lock);
static void fat12_ent_blocknr(struct super_block *sb, int entry,
int *offset, sector_t *blocknr)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = entry + (entry >> 1);
WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
static void fat_ent_blocknr(struct super_block *sb, int entry,
int *offset, sector_t *blocknr)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = (entry << sbi->fatent_shift);
WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
{
struct buffer_head **bhs = fatent->bhs;
if (fatent->nr_bhs == 1) {
WARN_ON(offset >= (bhs[0]->b_size - 1));
fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
} else {
WARN_ON(offset != (bhs[0]->b_size - 1));
fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
fatent->u.ent12_p[1] = bhs[1]->b_data;
}
}
static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
{
WARN_ON(offset & (2 - 1));
fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
}
static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
{
WARN_ON(offset & (4 - 1));
fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
}
static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
int offset, sector_t blocknr)
{
struct buffer_head **bhs = fatent->bhs;
WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
bhs[0] = sb_bread(sb, blocknr);
if (!bhs[0])
goto err;
if ((offset + 1) < sb->s_blocksize)
fatent->nr_bhs = 1;
else {
/* This entry is block boundary, it needs the next block */
blocknr++;
bhs[1] = sb_bread(sb, blocknr);
if (!bhs[1])
goto err_brelse;
fatent->nr_bhs = 2;
}
fat12_ent_set_ptr(fatent, offset);
return 0;
err_brelse:
brelse(bhs[0]);
err:
fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
(llu)blocknr);
return -EIO;
}
static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
int offset, sector_t blocknr)
{
const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
fatent->bhs[0] = sb_bread(sb, blocknr);
if (!fatent->bhs[0]) {
fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
(llu)blocknr);
return -EIO;
}
fatent->nr_bhs = 1;
ops->ent_set_ptr(fatent, offset);
return 0;
}
static int fat12_ent_get(struct fat_entry *fatent)
{
u8 **ent12_p = fatent->u.ent12_p;
int next;
spin_lock(&fat12_entry_lock);
if (fatent->entry & 1)
next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
else
next = (*ent12_p[1] << 8) | *ent12_p[0];
spin_unlock(&fat12_entry_lock);
next &= 0x0fff;
if (next >= BAD_FAT12)
next = FAT_ENT_EOF;
return next;
}
static int fat16_ent_get(struct fat_entry *fatent)
{
int next = le16_to_cpu(*fatent->u.ent16_p);
WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
if (next >= BAD_FAT16)
next = FAT_ENT_EOF;
return next;
}
static int fat32_ent_get(struct fat_entry *fatent)
{
int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
if (next >= BAD_FAT32)
next = FAT_ENT_EOF;
return next;
}
static void fat12_ent_put(struct fat_entry *fatent, int new)
{
u8 **ent12_p = fatent->u.ent12_p;
if (new == FAT_ENT_EOF)
new = EOF_FAT12;
spin_lock(&fat12_entry_lock);
if (fatent->entry & 1) {
*ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
*ent12_p[1] = new >> 4;
} else {
*ent12_p[0] = new & 0xff;
*ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
}
spin_unlock(&fat12_entry_lock);
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
if (fatent->nr_bhs == 2)
mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
}
static void fat16_ent_put(struct fat_entry *fatent, int new)
{
if (new == FAT_ENT_EOF)
new = EOF_FAT16;
*fatent->u.ent16_p = cpu_to_le16(new);
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
static void fat32_ent_put(struct fat_entry *fatent, int new)
{
WARN_ON(new & 0xf0000000);
new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
*fatent->u.ent32_p = cpu_to_le32(new);
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
static int fat12_ent_next(struct fat_entry *fatent)
{
u8 **ent12_p = fatent->u.ent12_p;
struct buffer_head **bhs = fatent->bhs;
u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
fatent->entry++;
if (fatent->nr_bhs == 1) {
WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
(bhs[0]->b_size - 2)));
WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
(bhs[0]->b_size - 1)));
if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
ent12_p[0] = nextp - 1;
ent12_p[1] = nextp;
return 1;
}
} else {
WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
(bhs[0]->b_size - 1)));
WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
ent12_p[0] = nextp - 1;
ent12_p[1] = nextp;
brelse(bhs[0]);
bhs[0] = bhs[1];
fatent->nr_bhs = 1;
return 1;
}
ent12_p[0] = NULL;
ent12_p[1] = NULL;
return 0;
}
static int fat16_ent_next(struct fat_entry *fatent)
{
const struct buffer_head *bh = fatent->bhs[0];
fatent->entry++;
if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
fatent->u.ent16_p++;
return 1;
}
fatent->u.ent16_p = NULL;
return 0;
}
static int fat32_ent_next(struct fat_entry *fatent)
{
const struct buffer_head *bh = fatent->bhs[0];
fatent->entry++;
if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
fatent->u.ent32_p++;
return 1;
}
fatent->u.ent32_p = NULL;
return 0;
}
static const struct fatent_operations fat12_ops = {
.ent_blocknr = fat12_ent_blocknr,
.ent_set_ptr = fat12_ent_set_ptr,
.ent_bread = fat12_ent_bread,
.ent_get = fat12_ent_get,
.ent_put = fat12_ent_put,
.ent_next = fat12_ent_next,
};
static const struct fatent_operations fat16_ops = {
.ent_blocknr = fat_ent_blocknr,
.ent_set_ptr = fat16_ent_set_ptr,
.ent_bread = fat_ent_bread,
.ent_get = fat16_ent_get,
.ent_put = fat16_ent_put,
.ent_next = fat16_ent_next,
};
static const struct fatent_operations fat32_ops = {
.ent_blocknr = fat_ent_blocknr,
.ent_set_ptr = fat32_ent_set_ptr,
.ent_bread = fat_ent_bread,
.ent_get = fat32_ent_get,
.ent_put = fat32_ent_put,
.ent_next = fat32_ent_next,
};
static inline void lock_fat(struct msdos_sb_info *sbi)
{
mutex_lock(&sbi->fat_lock);
}
static inline void unlock_fat(struct msdos_sb_info *sbi)
{
mutex_unlock(&sbi->fat_lock);
}
void fat_ent_access_init(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
mutex_init(&sbi->fat_lock);
if (is_fat32(sbi)) {
sbi->fatent_shift = 2;
sbi->fatent_ops = &fat32_ops;
} else if (is_fat16(sbi)) {
sbi->fatent_shift = 1;
sbi->fatent_ops = &fat16_ops;
} else if (is_fat12(sbi)) {
sbi->fatent_shift = -1;
sbi->fatent_ops = &fat12_ops;
} else {
fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
}
}
static void mark_fsinfo_dirty(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
if (sb_rdonly(sb) || !is_fat32(sbi))
return;
__mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
}
static inline int fat_ent_update_ptr(struct super_block *sb,
struct fat_entry *fatent,
int offset, sector_t blocknr)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct buffer_head **bhs = fatent->bhs;
/* Is this fatent's blocks including this entry? */
if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
return 0;
if (is_fat12(sbi)) {
if ((offset + 1) < sb->s_blocksize) {
/* This entry is on bhs[0]. */
if (fatent->nr_bhs == 2) {
brelse(bhs[1]);
fatent->nr_bhs = 1;
}
} else {
/* This entry needs the next block. */
if (fatent->nr_bhs != 2)
return 0;
if (bhs[1]->b_blocknr != (blocknr + 1))
return 0;
}
}
ops->ent_set_ptr(fatent, offset);
return 1;
}
int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
const struct fatent_operations *ops = sbi->fatent_ops;
int err, offset;
sector_t blocknr;
if (!fat_valid_entry(sbi, entry)) {
fatent_brelse(fatent);
fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;
}
fatent_set_entry(fatent, entry);
ops->ent_blocknr(sb, entry, &offset, &blocknr);
if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
fatent_brelse(fatent);
err = ops->ent_bread(sb, fatent, offset, blocknr);
if (err)
return err;
}
return ops->ent_get(fatent);
}
/* FIXME: We can write the blocks as more big chunk. */
static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
int nr_bhs)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *c_bh;
int err, n, copy;
err = 0;
for (copy = 1; copy < sbi->fats; copy++) {
sector_t backup_fat = sbi->fat_length * copy;
for (n = 0; n < nr_bhs; n++) {
c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
if (!c_bh) {
err = -ENOMEM;
goto error;
}
/* Avoid race with userspace read via bdev */
lock_buffer(c_bh);
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
unlock_buffer(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
if (sb->s_flags & SB_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
brelse(c_bh);
if (err)
goto error;
}
}
error:
return err;
}
int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
int new, int wait)
{
struct super_block *sb = inode->i_sb;
const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
int err;
ops->ent_put(fatent, new);
if (wait) {
err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
if (err)
return err;
}
return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
}
static inline int fat_ent_next(struct msdos_sb_info *sbi,
struct fat_entry *fatent)
{
if (sbi->fatent_ops->ent_next(fatent)) {
if (fatent->entry < sbi->max_cluster)
return 1;
}
return 0;
}
static inline int fat_ent_read_block(struct super_block *sb,
struct fat_entry *fatent)
{
const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
sector_t blocknr;
int offset;
fatent_brelse(fatent);
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
return ops->ent_bread(sb, fatent, offset, blocknr);
}
static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
struct fat_entry *fatent)
{
int n, i;
for (n = 0; n < fatent->nr_bhs; n++) {
for (i = 0; i < *nr_bhs; i++) {
if (fatent->bhs[n] == bhs[i])
break;
}
if (i == *nr_bhs) {
get_bh(fatent->bhs[n]);
bhs[i] = fatent->bhs[n];
(*nr_bhs)++;
}
}
}
int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent, prev_ent;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
int i, count, err, nr_bhs, idx_clus;
BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
lock_fat(sbi);
if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
sbi->free_clusters < nr_cluster) {
unlock_fat(sbi);
return -ENOSPC;
}
err = nr_bhs = idx_clus = 0;
count = FAT_START_ENT;
fatent_init(&prev_ent);
fatent_init(&fatent);
fatent_set_entry(&fatent, sbi->prev_free + 1);
while (count < sbi->max_cluster) {
if (fatent.entry >= sbi->max_cluster)
fatent.entry = FAT_START_ENT;
fatent_set_entry(&fatent, fatent.entry);
err = fat_ent_read_block(sb, &fatent);
if (err)
goto out;
/* Find the free entries in a block */
do {
if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
int entry = fatent.entry;
/* make the cluster chain */
ops->ent_put(&fatent, FAT_ENT_EOF);
if (prev_ent.nr_bhs)
ops->ent_put(&prev_ent, entry);
fat_collect_bhs(bhs, &nr_bhs, &fatent);
sbi->prev_free = entry;
if (sbi->free_clusters != -1)
sbi->free_clusters--;
cluster[idx_clus] = entry;
idx_clus++;
if (idx_clus == nr_cluster)
goto out;
/*
* fat_collect_bhs() gets ref-count of bhs,
* so we can still use the prev_ent.
*/
prev_ent = fatent;
}
count++;
if (count == sbi->max_cluster)
break;
} while (fat_ent_next(sbi, &fatent));
}
/* Couldn't allocate the free entries */
sbi->free_clusters = 0;
sbi->free_clus_valid = 1;
err = -ENOSPC;
out:
unlock_fat(sbi);
mark_fsinfo_dirty(sb);
fatent_brelse(&fatent);
if (!err) {
if (inode_needs_sync(inode))
err = fat_sync_bhs(bhs, nr_bhs);
if (!err)
err = fat_mirror_bhs(sb, bhs, nr_bhs);
}
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
if (err && idx_clus)
fat_free_clusters(inode, cluster[0]);
return err;
}
int fat_free_clusters(struct inode *inode, int cluster)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
int i, err, nr_bhs;
int first_cl = cluster, dirty_fsinfo = 0;
nr_bhs = 0;
fatent_init(&fatent);
lock_fat(sbi);
do {
cluster = fat_ent_read(inode, &fatent, cluster);
if (cluster < 0) {
err = cluster;
goto error;
} else if (cluster == FAT_ENT_FREE) {
fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
__func__);
err = -EIO;
goto error;
}
if (sbi->options.discard) {
/*
* Issue discard for the sectors we no longer
* care about, batching contiguous clusters
* into one request
*/
if (cluster != fatent.entry + 1) {
int nr_clus = fatent.entry - first_cl + 1;
sb_issue_discard(sb,
fat_clus_to_blknr(sbi, first_cl),
nr_clus * sbi->sec_per_clus,
GFP_NOFS, 0);
first_cl = cluster;
}
}
ops->ent_put(&fatent, FAT_ENT_FREE);
if (sbi->free_clusters != -1) {
sbi->free_clusters++;
dirty_fsinfo = 1;
}
if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
if (sb->s_flags & SB_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
}
err = fat_mirror_bhs(sb, bhs, nr_bhs);
if (err)
goto error;
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
nr_bhs = 0;
}
fat_collect_bhs(bhs, &nr_bhs, &fatent);
} while (cluster != FAT_ENT_EOF);
if (sb->s_flags & SB_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
}
err = fat_mirror_bhs(sb, bhs, nr_bhs);
error:
fatent_brelse(&fatent);
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
unlock_fat(sbi);
if (dirty_fsinfo)
mark_fsinfo_dirty(sb);
return err;
}
EXPORT_SYMBOL_GPL(fat_free_clusters);
struct fatent_ra {
sector_t cur;
sector_t limit;
unsigned int ra_blocks;
sector_t ra_advance;
sector_t ra_next;
sector_t ra_limit;
};
static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
struct fat_entry *fatent, int ent_limit)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
sector_t blocknr, block_end;
int offset;
/*
* This is the sequential read, so ra_pages * 2 (but try to
* align the optimal hardware IO size).
* [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
*/
unsigned long ra_pages = sb->s_bdi->ra_pages;
unsigned int reada_blocks;
if (fatent->entry >= ent_limit)
return;
if (ra_pages > sb->s_bdi->io_pages)
ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
/* Initialize the range for sequential read */
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
ra->cur = 0;
ra->limit = (block_end + 1) - blocknr;
/* Advancing the window at half size */
ra->ra_blocks = reada_blocks >> 1;
ra->ra_advance = ra->cur;
ra->ra_next = ra->cur;
ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
}
/* Assuming to be called before reading a new block (increments ->cur). */
static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
struct fat_entry *fatent)
{
if (ra->ra_next >= ra->ra_limit)
return;
if (ra->cur >= ra->ra_advance) {
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct blk_plug plug;
sector_t blocknr, diff;
int offset;
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
diff = blocknr - ra->cur;
blk_start_plug(&plug);
/*
* FIXME: we would want to directly use the bio with
* pages to reduce the number of segments.
*/
for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
sb_breadahead(sb, ra->ra_next + diff);
blk_finish_plug(&plug);
/* Advance the readahead window */
ra->ra_advance += ra->ra_blocks;
ra->ra_limit += min_t(sector_t,
ra->ra_blocks, ra->limit - ra->ra_limit);
}
ra->cur++;
}
int fat_count_free_clusters(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
struct fatent_ra fatent_ra;
int err = 0, free;
lock_fat(sbi);
if (sbi->free_clusters != -1 && sbi->free_clus_valid)
goto out;
free = 0;
fatent_init(&fatent);
fatent_set_entry(&fatent, FAT_START_ENT);
fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
while (fatent.entry < sbi->max_cluster) {
/* readahead of fat blocks */
fat_ent_reada(sb, &fatent_ra, &fatent);
err = fat_ent_read_block(sb, &fatent);
if (err)
goto out;
do {
if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++;
} while (fat_ent_next(sbi, &fatent));
cond_resched();
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
mark_fsinfo_dirty(sb);
fatent_brelse(&fatent);
out:
unlock_fat(sbi);
return err;
}
static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
}
int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
struct fatent_ra fatent_ra;
u64 ent_start, ent_end, minlen, trimmed = 0;
u32 free = 0;
int err = 0;
/*
* FAT data is organized as clusters, trim at the granulary of cluster.
*
* fstrim_range is in byte, convert values to cluster index.
* Treat sectors before data region as all used, not to trim them.
*/
ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
minlen = range->minlen >> sbi->cluster_bits;
if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
return -EINVAL;
if (ent_end >= sbi->max_cluster)
ent_end = sbi->max_cluster - 1;
fatent_init(&fatent);
lock_fat(sbi);
fatent_set_entry(&fatent, ent_start);
fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
while (fatent.entry <= ent_end) {
/* readahead of fat blocks */
fat_ent_reada(sb, &fatent_ra, &fatent);
err = fat_ent_read_block(sb, &fatent);
if (err)
goto error;
do {
if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
free++;
} else if (free) {
if (free >= minlen) {
u32 clus = fatent.entry - free;
err = fat_trim_clusters(sb, clus, free);
if (err && err != -EOPNOTSUPP)
goto error;
if (!err)
trimmed += free;
err = 0;
}
free = 0;
}
} while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
if (fatal_signal_pending(current)) {
err = -ERESTARTSYS;
goto error;
}
if (need_resched()) {
fatent_brelse(&fatent);
unlock_fat(sbi);
cond_resched();
lock_fat(sbi);
}
}
/* handle scenario when tail entries are all free */
if (free && free >= minlen) {
u32 clus = fatent.entry - free;
err = fat_trim_clusters(sb, clus, free);
if (err && err != -EOPNOTSUPP)
goto error;
if (!err)
trimmed += free;
err = 0;
}
error:
fatent_brelse(&fatent);
unlock_fat(sbi);
range->len = trimmed << sbi->cluster_bits;
return err;
}
| linux-master | fs/fat/fatent.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/fat/dir.c
*
* directory handling functions for fat-based filesystems
*
* Written 1992,1993 by Werner Almesberger
*
* Hidden files 1995 by Albert Cahalan <[email protected]> <[email protected]>
*
* VFAT extensions by Gordon Chaffee <[email protected]>
* Merged with msdos fs by Henrik Storner <[email protected]>
* Rewritten for constant inumbers. Plugged buffer overrun in readdir(). AV
* Short name translation 1999, 2001 by Wolfram Pienkoss <[email protected]>
*/
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/iversion.h>
#include "fat.h"
/*
* Maximum buffer size of short name.
* [(MSDOS_NAME + '.') * max one char + nul]
* For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul]
*/
#define FAT_MAX_SHORT_SIZE ((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1)
/*
* Maximum buffer size of unicode chars from slots.
* [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
*/
#define FAT_MAX_UNI_CHARS ((MSDOS_SLOTS - 1) * 13 + 1)
#define FAT_MAX_UNI_SIZE (FAT_MAX_UNI_CHARS * sizeof(wchar_t))
static inline unsigned char fat_tolower(unsigned char c)
{
return ((c >= 'A') && (c <= 'Z')) ? c+32 : c;
}
static inline loff_t fat_make_i_pos(struct super_block *sb,
struct buffer_head *bh,
struct msdos_dir_entry *de)
{
return ((loff_t)bh->b_blocknr << MSDOS_SB(sb)->dir_per_block_bits)
| (de - (struct msdos_dir_entry *)bh->b_data);
}
static inline void fat_dir_readahead(struct inode *dir, sector_t iblock,
sector_t phys)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
int sec;
/* This is not a first sector of cluster, or sec_per_clus == 1 */
if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1)
return;
/* root dir of FAT12/FAT16 */
if (!is_fat32(sbi) && (dir->i_ino == MSDOS_ROOT_INO))
return;
bh = sb_find_get_block(sb, phys);
if (bh == NULL || !buffer_uptodate(bh)) {
for (sec = 0; sec < sbi->sec_per_clus; sec++)
sb_breadahead(sb, phys + sec);
}
brelse(bh);
}
/* Returns the inode number of the directory entry at offset pos. If bh is
non-NULL, it is brelse'd before. Pos is incremented. The buffer header is
returned in bh.
AV. Most often we do it item-by-item. Makes sense to optimize.
AV. OK, there we go: if both bh and de are non-NULL we assume that we just
AV. want the next entry (took one explicit de=NULL in vfat/namei.c).
AV. It's done in fat_get_entry() (inlined), here the slow case lives.
AV. Additionally, when we return -1 (i.e. reached the end of directory)
AV. we make bh NULL.
*/
static int fat__get_entry(struct inode *dir, loff_t *pos,
struct buffer_head **bh, struct msdos_dir_entry **de)
{
struct super_block *sb = dir->i_sb;
sector_t phys, iblock;
unsigned long mapped_blocks;
int err, offset;
next:
brelse(*bh);
*bh = NULL;
iblock = *pos >> sb->s_blocksize_bits;
err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false);
if (err || !phys)
return -1; /* beyond EOF or error */
fat_dir_readahead(dir, iblock, phys);
*bh = sb_bread(sb, phys);
if (*bh == NULL) {
fat_msg_ratelimit(sb, KERN_ERR,
"Directory bread(block %llu) failed", (llu)phys);
/* skip this block */
*pos = (iblock + 1) << sb->s_blocksize_bits;
goto next;
}
offset = *pos & (sb->s_blocksize - 1);
*pos += sizeof(struct msdos_dir_entry);
*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);
return 0;
}
static inline int fat_get_entry(struct inode *dir, loff_t *pos,
struct buffer_head **bh,
struct msdos_dir_entry **de)
{
/* Fast stuff first */
if (*bh && *de &&
(*de - (struct msdos_dir_entry *)(*bh)->b_data) <
MSDOS_SB(dir->i_sb)->dir_per_block - 1) {
*pos += sizeof(struct msdos_dir_entry);
(*de)++;
return 0;
}
return fat__get_entry(dir, pos, bh, de);
}
/*
* Convert Unicode 16 to UTF-8, translated Unicode, or ASCII.
* If uni_xlate is enabled and we can't get a 1:1 conversion, use a
* colon as an escape character since it is normally invalid on the vfat
* filesystem. The following four characters are the hexadecimal digits
* of Unicode value. This lets us do a full dump and restore of Unicode
* filenames. We could get into some trouble with long Unicode names,
* but ignore that right now.
* Ahem... Stack smashing in ring 0 isn't fun. Fixed.
*/
static int uni16_to_x8(struct super_block *sb, unsigned char *ascii,
const wchar_t *uni, int len, struct nls_table *nls)
{
int uni_xlate = MSDOS_SB(sb)->options.unicode_xlate;
const wchar_t *ip;
wchar_t ec;
unsigned char *op;
int charlen;
ip = uni;
op = ascii;
while (*ip && ((len - NLS_MAX_CHARSET_SIZE) > 0)) {
ec = *ip++;
charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE);
if (charlen > 0) {
op += charlen;
len -= charlen;
} else {
if (uni_xlate == 1) {
*op++ = ':';
op = hex_byte_pack(op, ec >> 8);
op = hex_byte_pack(op, ec);
len -= 5;
} else {
*op++ = '?';
len--;
}
}
}
if (unlikely(*ip)) {
fat_msg(sb, KERN_WARNING,
"filename was truncated while converting.");
}
*op = 0;
return op - ascii;
}
static inline int fat_uni_to_x8(struct super_block *sb, const wchar_t *uni,
unsigned char *buf, int size)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
if (sbi->options.utf8)
return utf16s_to_utf8s(uni, FAT_MAX_UNI_CHARS,
UTF16_HOST_ENDIAN, buf, size);
else
return uni16_to_x8(sb, buf, uni, size, sbi->nls_io);
}
static inline int
fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
{
int charlen;
charlen = t->char2uni(c, clen, uni);
if (charlen < 0) {
*uni = 0x003f; /* a question mark */
charlen = 1;
}
return charlen;
}
static inline int
fat_short2lower_uni(struct nls_table *t, unsigned char *c,
int clen, wchar_t *uni)
{
int charlen;
wchar_t wc;
charlen = t->char2uni(c, clen, &wc);
if (charlen < 0) {
*uni = 0x003f; /* a question mark */
charlen = 1;
} else if (charlen <= 1) {
unsigned char nc = t->charset2lower[*c];
if (!nc)
nc = *c;
charlen = t->char2uni(&nc, 1, uni);
if (charlen < 0) {
*uni = 0x003f; /* a question mark */
charlen = 1;
}
} else
*uni = wc;
return charlen;
}
static inline int
fat_shortname2uni(struct nls_table *nls, unsigned char *buf, int buf_size,
wchar_t *uni_buf, unsigned short opt, int lower)
{
int len = 0;
if (opt & VFAT_SFN_DISPLAY_LOWER)
len = fat_short2lower_uni(nls, buf, buf_size, uni_buf);
else if (opt & VFAT_SFN_DISPLAY_WIN95)
len = fat_short2uni(nls, buf, buf_size, uni_buf);
else if (opt & VFAT_SFN_DISPLAY_WINNT) {
if (lower)
len = fat_short2lower_uni(nls, buf, buf_size, uni_buf);
else
len = fat_short2uni(nls, buf, buf_size, uni_buf);
} else
len = fat_short2uni(nls, buf, buf_size, uni_buf);
return len;
}
static inline int fat_name_match(struct msdos_sb_info *sbi,
const unsigned char *a, int a_len,
const unsigned char *b, int b_len)
{
if (a_len != b_len)
return 0;
if (sbi->options.name_check != 's')
return !nls_strnicmp(sbi->nls_io, a, b, a_len);
else
return !memcmp(a, b, a_len);
}
enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, };
/**
* fat_parse_long - Parse extended directory entry.
*
* This function returns zero on success, negative value on error, or one of
* the following:
*
* %PARSE_INVALID - Directory entry is invalid.
* %PARSE_NOT_LONGNAME - Directory entry does not contain longname.
* %PARSE_EOF - Directory has no more entries.
*/
static int fat_parse_long(struct inode *dir, loff_t *pos,
struct buffer_head **bh, struct msdos_dir_entry **de,
wchar_t **unicode, unsigned char *nr_slots)
{
struct msdos_dir_slot *ds;
unsigned char id, slot, slots, alias_checksum;
if (!*unicode) {
*unicode = __getname();
if (!*unicode) {
brelse(*bh);
return -ENOMEM;
}
}
parse_long:
ds = (struct msdos_dir_slot *)*de;
id = ds->id;
if (!(id & 0x40))
return PARSE_INVALID;
slots = id & ~0x40;
if (slots > 20 || !slots) /* ceil(256 * 2 / 26) */
return PARSE_INVALID;
*nr_slots = slots;
alias_checksum = ds->alias_checksum;
slot = slots;
while (1) {
int offset;
slot--;
offset = slot * 13;
fat16_towchar(*unicode + offset, ds->name0_4, 5);
fat16_towchar(*unicode + offset + 5, ds->name5_10, 6);
fat16_towchar(*unicode + offset + 11, ds->name11_12, 2);
if (ds->id & 0x40)
(*unicode)[offset + 13] = 0;
if (fat_get_entry(dir, pos, bh, de) < 0)
return PARSE_EOF;
if (slot == 0)
break;
ds = (struct msdos_dir_slot *)*de;
if (ds->attr != ATTR_EXT)
return PARSE_NOT_LONGNAME;
if ((ds->id & ~0x40) != slot)
goto parse_long;
if (ds->alias_checksum != alias_checksum)
goto parse_long;
}
if ((*de)->name[0] == DELETED_FLAG)
return PARSE_INVALID;
if ((*de)->attr == ATTR_EXT)
goto parse_long;
if (IS_FREE((*de)->name) || ((*de)->attr & ATTR_VOLUME))
return PARSE_INVALID;
if (fat_checksum((*de)->name) != alias_checksum)
*nr_slots = 0;
return 0;
}
/**
* fat_parse_short - Parse MS-DOS (short) directory entry.
* @sb: superblock
* @de: directory entry to parse
* @name: FAT_MAX_SHORT_SIZE array in which to place extracted name
* @dot_hidden: Nonzero == prepend '.' to names with ATTR_HIDDEN
*
* Returns the number of characters extracted into 'name'.
*/
static int fat_parse_short(struct super_block *sb,
const struct msdos_dir_entry *de,
unsigned char *name, int dot_hidden)
{
const struct msdos_sb_info *sbi = MSDOS_SB(sb);
int isvfat = sbi->options.isvfat;
int nocase = sbi->options.nocase;
unsigned short opt_shortname = sbi->options.shortname;
struct nls_table *nls_disk = sbi->nls_disk;
wchar_t uni_name[14];
unsigned char c, work[MSDOS_NAME];
unsigned char *ptname = name;
int chi, chl, i, j, k;
int dotoffset = 0;
int name_len = 0, uni_len = 0;
if (!isvfat && dot_hidden && (de->attr & ATTR_HIDDEN)) {
*ptname++ = '.';
dotoffset = 1;
}
memcpy(work, de->name, sizeof(work));
/* For an explanation of the special treatment of 0x05 in
* filenames, see msdos_format_name in namei_msdos.c
*/
if (work[0] == 0x05)
work[0] = 0xE5;
/* Filename */
for (i = 0, j = 0; i < 8;) {
c = work[i];
if (!c)
break;
chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
&uni_name[j++], opt_shortname,
de->lcase & CASE_LOWER_BASE);
if (chl <= 1) {
if (!isvfat)
ptname[i] = nocase ? c : fat_tolower(c);
i++;
if (c != ' ') {
name_len = i;
uni_len = j;
}
} else {
uni_len = j;
if (isvfat)
i += min(chl, 8-i);
else {
for (chi = 0; chi < chl && i < 8; chi++, i++)
ptname[i] = work[i];
}
if (chl)
name_len = i;
}
}
i = name_len;
j = uni_len;
fat_short2uni(nls_disk, ".", 1, &uni_name[j++]);
if (!isvfat)
ptname[i] = '.';
i++;
/* Extension */
for (k = 8; k < MSDOS_NAME;) {
c = work[k];
if (!c)
break;
chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k,
&uni_name[j++], opt_shortname,
de->lcase & CASE_LOWER_EXT);
if (chl <= 1) {
k++;
if (!isvfat)
ptname[i] = nocase ? c : fat_tolower(c);
i++;
if (c != ' ') {
name_len = i;
uni_len = j;
}
} else {
uni_len = j;
if (isvfat) {
int offset = min(chl, MSDOS_NAME-k);
k += offset;
i += offset;
} else {
for (chi = 0; chi < chl && k < MSDOS_NAME;
chi++, i++, k++) {
ptname[i] = work[k];
}
}
if (chl)
name_len = i;
}
}
if (name_len > 0) {
name_len += dotoffset;
if (sbi->options.isvfat) {
uni_name[uni_len] = 0x0000;
name_len = fat_uni_to_x8(sb, uni_name, name,
FAT_MAX_SHORT_SIZE);
}
}
return name_len;
}
/*
* Return values: negative -> error/not found, 0 -> found.
*/
int fat_search_long(struct inode *inode, const unsigned char *name,
int name_len, struct fat_slot_info *sinfo)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh = NULL;
struct msdos_dir_entry *de;
unsigned char nr_slots;
wchar_t *unicode = NULL;
unsigned char bufname[FAT_MAX_SHORT_SIZE];
loff_t cpos = 0;
int err, len;
err = -ENOENT;
while (1) {
if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
goto end_of_dir;
parse_record:
nr_slots = 0;
if (de->name[0] == DELETED_FLAG)
continue;
if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME))
continue;
if (de->attr != ATTR_EXT && IS_FREE(de->name))
continue;
if (de->attr == ATTR_EXT) {
int status = fat_parse_long(inode, &cpos, &bh, &de,
&unicode, &nr_slots);
if (status < 0) {
err = status;
goto end_of_dir;
} else if (status == PARSE_INVALID)
continue;
else if (status == PARSE_NOT_LONGNAME)
goto parse_record;
else if (status == PARSE_EOF)
goto end_of_dir;
}
/* Never prepend '.' to hidden files here.
* That is done only for msdos mounts (and only when
* 'dotsOK=yes'); if we are executing here, it is in the
* context of a vfat mount.
*/
len = fat_parse_short(sb, de, bufname, 0);
if (len == 0)
continue;
/* Compare shortname */
if (fat_name_match(sbi, name, name_len, bufname, len))
goto found;
if (nr_slots) {
void *longname = unicode + FAT_MAX_UNI_CHARS;
int size = PATH_MAX - FAT_MAX_UNI_SIZE;
/* Compare longname */
len = fat_uni_to_x8(sb, unicode, longname, size);
if (fat_name_match(sbi, name, name_len, longname, len))
goto found;
}
}
found:
nr_slots++; /* include the de */
sinfo->slot_off = cpos - nr_slots * sizeof(*de);
sinfo->nr_slots = nr_slots;
sinfo->de = de;
sinfo->bh = bh;
sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
err = 0;
end_of_dir:
if (unicode)
__putname(unicode);
return err;
}
EXPORT_SYMBOL_GPL(fat_search_long);
struct fat_ioctl_filldir_callback {
struct dir_context ctx;
void __user *dirent;
int result;
/* for dir ioctl */
const char *longname;
int long_len;
const char *shortname;
int short_len;
};
static int __fat_readdir(struct inode *inode, struct file *file,
struct dir_context *ctx, int short_only,
struct fat_ioctl_filldir_callback *both)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct msdos_dir_entry *de;
unsigned char nr_slots;
wchar_t *unicode = NULL;
unsigned char bufname[FAT_MAX_SHORT_SIZE];
int isvfat = sbi->options.isvfat;
const char *fill_name = NULL;
int fake_offset = 0;
loff_t cpos;
int short_len = 0, fill_len = 0;
int ret = 0;
mutex_lock(&sbi->s_lock);
cpos = ctx->pos;
/* Fake . and .. for the root directory. */
if (inode->i_ino == MSDOS_ROOT_INO) {
if (!dir_emit_dots(file, ctx))
goto out;
if (ctx->pos == 2) {
fake_offset = 1;
cpos = 0;
}
}
if (cpos & (sizeof(struct msdos_dir_entry) - 1)) {
ret = -ENOENT;
goto out;
}
bh = NULL;
get_new:
if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
goto end_of_dir;
parse_record:
nr_slots = 0;
/*
* Check for long filename entry, but if short_only, we don't
* need to parse long filename.
*/
if (isvfat && !short_only) {
if (de->name[0] == DELETED_FLAG)
goto record_end;
if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME))
goto record_end;
if (de->attr != ATTR_EXT && IS_FREE(de->name))
goto record_end;
} else {
if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name))
goto record_end;
}
if (isvfat && de->attr == ATTR_EXT) {
int status = fat_parse_long(inode, &cpos, &bh, &de,
&unicode, &nr_slots);
if (status < 0) {
bh = NULL;
ret = status;
goto end_of_dir;
} else if (status == PARSE_INVALID)
goto record_end;
else if (status == PARSE_NOT_LONGNAME)
goto parse_record;
else if (status == PARSE_EOF)
goto end_of_dir;
if (nr_slots) {
void *longname = unicode + FAT_MAX_UNI_CHARS;
int size = PATH_MAX - FAT_MAX_UNI_SIZE;
int len = fat_uni_to_x8(sb, unicode, longname, size);
fill_name = longname;
fill_len = len;
/* !both && !short_only, so we don't need shortname. */
if (!both)
goto start_filldir;
short_len = fat_parse_short(sb, de, bufname,
sbi->options.dotsOK);
if (short_len == 0)
goto record_end;
/* hack for fat_ioctl_filldir() */
both->longname = fill_name;
both->long_len = fill_len;
both->shortname = bufname;
both->short_len = short_len;
fill_name = NULL;
fill_len = 0;
goto start_filldir;
}
}
short_len = fat_parse_short(sb, de, bufname, sbi->options.dotsOK);
if (short_len == 0)
goto record_end;
fill_name = bufname;
fill_len = short_len;
start_filldir:
ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
if (fake_offset && ctx->pos < 2)
ctx->pos = 2;
if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
if (!dir_emit_dot(file, ctx))
goto fill_failed;
} else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
if (!dir_emit_dotdot(file, ctx))
goto fill_failed;
} else {
unsigned long inum;
loff_t i_pos = fat_make_i_pos(sb, bh, de);
struct inode *tmp = fat_iget(sb, i_pos);
if (tmp) {
inum = tmp->i_ino;
iput(tmp);
} else
inum = iunique(sb, MSDOS_ROOT_INO);
if (!dir_emit(ctx, fill_name, fill_len, inum,
(de->attr & ATTR_DIR) ? DT_DIR : DT_REG))
goto fill_failed;
}
record_end:
fake_offset = 0;
ctx->pos = cpos;
goto get_new;
end_of_dir:
if (fake_offset && cpos < 2)
ctx->pos = 2;
else
ctx->pos = cpos;
fill_failed:
brelse(bh);
if (unicode)
__putname(unicode);
out:
mutex_unlock(&sbi->s_lock);
return ret;
}
static int fat_readdir(struct file *file, struct dir_context *ctx)
{
return __fat_readdir(file_inode(file), file, ctx, 0, NULL);
}
#define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \
static bool func(struct dir_context *ctx, const char *name, int name_len, \
loff_t offset, u64 ino, unsigned int d_type) \
{ \
struct fat_ioctl_filldir_callback *buf = \
container_of(ctx, struct fat_ioctl_filldir_callback, ctx); \
struct dirent_type __user *d1 = buf->dirent; \
struct dirent_type __user *d2 = d1 + 1; \
\
if (buf->result) \
return false; \
buf->result++; \
\
if (name != NULL) { \
/* dirent has only short name */ \
if (name_len >= sizeof(d1->d_name)) \
name_len = sizeof(d1->d_name) - 1; \
\
if (put_user(0, &d2->d_name[0]) || \
put_user(0, &d2->d_reclen) || \
copy_to_user(d1->d_name, name, name_len) || \
put_user(0, d1->d_name + name_len) || \
put_user(name_len, &d1->d_reclen)) \
goto efault; \
} else { \
/* dirent has short and long name */ \
const char *longname = buf->longname; \
int long_len = buf->long_len; \
const char *shortname = buf->shortname; \
int short_len = buf->short_len; \
\
if (long_len >= sizeof(d1->d_name)) \
long_len = sizeof(d1->d_name) - 1; \
if (short_len >= sizeof(d1->d_name)) \
short_len = sizeof(d1->d_name) - 1; \
\
if (copy_to_user(d2->d_name, longname, long_len) || \
put_user(0, d2->d_name + long_len) || \
put_user(long_len, &d2->d_reclen) || \
put_user(ino, &d2->d_ino) || \
put_user(offset, &d2->d_off) || \
copy_to_user(d1->d_name, shortname, short_len) || \
put_user(0, d1->d_name + short_len) || \
put_user(short_len, &d1->d_reclen)) \
goto efault; \
} \
return true; \
efault: \
buf->result = -EFAULT; \
return false; \
}
FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent)
static int fat_ioctl_readdir(struct inode *inode, struct file *file,
void __user *dirent, filldir_t filldir,
int short_only, int both)
{
struct fat_ioctl_filldir_callback buf = {
.ctx.actor = filldir,
.dirent = dirent
};
int ret;
buf.dirent = dirent;
buf.result = 0;
inode_lock_shared(inode);
buf.ctx.pos = file->f_pos;
ret = -ENOENT;
if (!IS_DEADDIR(inode)) {
ret = __fat_readdir(inode, file, &buf.ctx,
short_only, both ? &buf : NULL);
file->f_pos = buf.ctx.pos;
}
inode_unlock_shared(inode);
if (ret >= 0)
ret = buf.result;
return ret;
}
static long fat_dir_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct __fat_dirent __user *d1 = (struct __fat_dirent __user *)arg;
int short_only, both;
switch (cmd) {
case VFAT_IOCTL_READDIR_SHORT:
short_only = 1;
both = 0;
break;
case VFAT_IOCTL_READDIR_BOTH:
short_only = 0;
both = 1;
break;
default:
return fat_generic_ioctl(filp, cmd, arg);
}
/*
* Yes, we don't need this put_user() absolutely. However old
* code didn't return the right value. So, app use this value,
* in order to check whether it is EOF.
*/
if (put_user(0, &d1->d_reclen))
return -EFAULT;
return fat_ioctl_readdir(inode, filp, d1, fat_ioctl_filldir,
short_only, both);
}
#ifdef CONFIG_COMPAT
#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent)
static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct compat_dirent __user *d1 = compat_ptr(arg);
int short_only, both;
switch (cmd) {
case VFAT_IOCTL_READDIR_SHORT32:
short_only = 1;
both = 0;
break;
case VFAT_IOCTL_READDIR_BOTH32:
short_only = 0;
both = 1;
break;
default:
return fat_generic_ioctl(filp, cmd, (unsigned long)arg);
}
/*
* Yes, we don't need this put_user() absolutely. However old
* code didn't return the right value. So, app use this value,
* in order to check whether it is EOF.
*/
if (put_user(0, &d1->d_reclen))
return -EFAULT;
return fat_ioctl_readdir(inode, filp, d1, fat_compat_ioctl_filldir,
short_only, both);
}
#endif /* CONFIG_COMPAT */
const struct file_operations fat_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = fat_readdir,
.unlocked_ioctl = fat_dir_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = fat_compat_dir_ioctl,
#endif
.fsync = fat_file_fsync,
};
static int fat_get_short_entry(struct inode *dir, loff_t *pos,
struct buffer_head **bh,
struct msdos_dir_entry **de)
{
while (fat_get_entry(dir, pos, bh, de) >= 0) {
/* free entry or long name entry or volume label */
if (!IS_FREE((*de)->name) && !((*de)->attr & ATTR_VOLUME))
return 0;
}
return -ENOENT;
}
/*
* The ".." entry can not provide the "struct fat_slot_info" information
* for inode, nor a usable i_pos. So, this function provides some information
* only.
*
* Since this function walks through the on-disk inodes within a directory,
* callers are responsible for taking any locks necessary to prevent the
* directory from changing.
*/
int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
struct msdos_dir_entry **de)
{
loff_t offset = 0;
*de = NULL;
while (fat_get_short_entry(dir, &offset, bh, de) >= 0) {
if (!strncmp((*de)->name, MSDOS_DOTDOT, MSDOS_NAME))
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(fat_get_dotdot_entry);
/* See if directory is empty */
int fat_dir_empty(struct inode *dir)
{
struct buffer_head *bh;
struct msdos_dir_entry *de;
loff_t cpos;
int result = 0;
bh = NULL;
cpos = 0;
while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) {
if (strncmp(de->name, MSDOS_DOT , MSDOS_NAME) &&
strncmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) {
result = -ENOTEMPTY;
break;
}
}
brelse(bh);
return result;
}
EXPORT_SYMBOL_GPL(fat_dir_empty);
/*
* fat_subdirs counts the number of sub-directories of dir. It can be run
* on directories being created.
*/
int fat_subdirs(struct inode *dir)
{
struct buffer_head *bh;
struct msdos_dir_entry *de;
loff_t cpos;
int count = 0;
bh = NULL;
cpos = 0;
while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) {
if (de->attr & ATTR_DIR)
count++;
}
brelse(bh);
return count;
}
/*
* Scans a directory for a given file (name points to its formatted name).
* Returns an error code or zero.
*/
int fat_scan(struct inode *dir, const unsigned char *name,
struct fat_slot_info *sinfo)
{
struct super_block *sb = dir->i_sb;
sinfo->slot_off = 0;
sinfo->bh = NULL;
while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh,
&sinfo->de) >= 0) {
if (!strncmp(sinfo->de->name, name, MSDOS_NAME)) {
sinfo->slot_off -= sizeof(*sinfo->de);
sinfo->nr_slots = 1;
sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(fat_scan);
/*
* Scans a directory for a given logstart.
* Returns an error code or zero.
*/
int fat_scan_logstart(struct inode *dir, int i_logstart,
struct fat_slot_info *sinfo)
{
struct super_block *sb = dir->i_sb;
sinfo->slot_off = 0;
sinfo->bh = NULL;
while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh,
&sinfo->de) >= 0) {
if (fat_get_start(MSDOS_SB(sb), sinfo->de) == i_logstart) {
sinfo->slot_off -= sizeof(*sinfo->de);
sinfo->nr_slots = 1;
sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
return 0;
}
}
return -ENOENT;
}
static int __fat_remove_entries(struct inode *dir, loff_t pos, int nr_slots)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct msdos_dir_entry *de, *endp;
int err = 0, orig_slots;
while (nr_slots) {
bh = NULL;
if (fat_get_entry(dir, &pos, &bh, &de) < 0) {
err = -EIO;
break;
}
orig_slots = nr_slots;
endp = (struct msdos_dir_entry *)(bh->b_data + sb->s_blocksize);
while (nr_slots && de < endp) {
de->name[0] = DELETED_FLAG;
de++;
nr_slots--;
}
mark_buffer_dirty_inode(bh, dir);
if (IS_DIRSYNC(dir))
err = sync_dirty_buffer(bh);
brelse(bh);
if (err)
break;
/* pos is *next* de's position, so this does `- sizeof(de)' */
pos += ((orig_slots - nr_slots) * sizeof(*de)) - sizeof(*de);
}
return err;
}
int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
{
struct super_block *sb = dir->i_sb;
struct msdos_dir_entry *de;
struct buffer_head *bh;
int err = 0, nr_slots;
/*
* First stage: Remove the shortname. By this, the directory
* entry is removed.
*/
nr_slots = sinfo->nr_slots;
de = sinfo->de;
sinfo->de = NULL;
bh = sinfo->bh;
sinfo->bh = NULL;
while (nr_slots && de >= (struct msdos_dir_entry *)bh->b_data) {
de->name[0] = DELETED_FLAG;
de--;
nr_slots--;
}
mark_buffer_dirty_inode(bh, dir);
if (IS_DIRSYNC(dir))
err = sync_dirty_buffer(bh);
brelse(bh);
if (err)
return err;
inode_inc_iversion(dir);
if (nr_slots) {
/*
* Second stage: remove the remaining longname slots.
* (This directory entry is already removed, and so return
* the success)
*/
err = __fat_remove_entries(dir, sinfo->slot_off, nr_slots);
if (err) {
fat_msg(sb, KERN_WARNING,
"Couldn't remove the long name slots");
}
}
fat_truncate_time(dir, NULL, S_ATIME|S_MTIME);
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
return 0;
}
EXPORT_SYMBOL_GPL(fat_remove_entries);
static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
struct buffer_head **bhs, int nr_bhs)
{
struct super_block *sb = dir->i_sb;
sector_t last_blknr = blknr + MSDOS_SB(sb)->sec_per_clus;
int err, i, n;
/* Zeroing the unused blocks on this cluster */
blknr += nr_used;
n = nr_used;
while (blknr < last_blknr) {
bhs[n] = sb_getblk(sb, blknr);
if (!bhs[n]) {
err = -ENOMEM;
goto error;
}
/* Avoid race with userspace read via bdev */
lock_buffer(bhs[n]);
memset(bhs[n]->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bhs[n]);
unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
n++;
blknr++;
if (n == nr_bhs) {
if (IS_DIRSYNC(dir)) {
err = fat_sync_bhs(bhs, n);
if (err)
goto error;
}
for (i = 0; i < n; i++)
brelse(bhs[i]);
n = 0;
}
}
if (IS_DIRSYNC(dir)) {
err = fat_sync_bhs(bhs, n);
if (err)
goto error;
}
for (i = 0; i < n; i++)
brelse(bhs[i]);
return 0;
error:
for (i = 0; i < n; i++)
bforget(bhs[i]);
return err;
}
int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
struct msdos_dir_entry *de;
sector_t blknr;
__le16 date, time;
u8 time_cs;
int err, cluster;
err = fat_alloc_clusters(dir, &cluster, 1);
if (err)
goto error;
blknr = fat_clus_to_blknr(sbi, cluster);
bhs[0] = sb_getblk(sb, blknr);
if (!bhs[0]) {
err = -ENOMEM;
goto error_free;
}
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de = (struct msdos_dir_entry *)bhs[0]->b_data;
/* Avoid race with userspace read via bdev */
lock_buffer(bhs[0]);
/* filling the new directory slots ("." and ".." entries) */
memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
de->attr = de[1].attr = ATTR_DIR;
de[0].lcase = de[1].lcase = 0;
de[0].time = de[1].time = time;
de[0].date = de[1].date = date;
if (sbi->options.isvfat) {
/* extra timestamps */
de[0].ctime = de[1].ctime = time;
de[0].ctime_cs = de[1].ctime_cs = time_cs;
de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = date;
} else {
de[0].ctime = de[1].ctime = 0;
de[0].ctime_cs = de[1].ctime_cs = 0;
de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = 0;
}
fat_set_start(&de[0], cluster);
fat_set_start(&de[1], MSDOS_I(dir)->i_logstart);
de[0].size = de[1].size = 0;
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
set_buffer_uptodate(bhs[0]);
unlock_buffer(bhs[0]);
mark_buffer_dirty_inode(bhs[0], dir);
err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
if (err)
goto error_free;
return cluster;
error_free:
fat_free_clusters(dir, cluster);
error:
return err;
}
EXPORT_SYMBOL_GPL(fat_alloc_new_dir);
static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
int *nr_cluster, struct msdos_dir_entry **de,
struct buffer_head **bh, loff_t *i_pos)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
sector_t blknr, start_blknr, last_blknr;
unsigned long size, copy;
int err, i, n, offset, cluster[2];
/*
* The minimum cluster size is 512bytes, and maximum entry
* size is 32*slots (672bytes). So, iff the cluster size is
* 512bytes, we may need two clusters.
*/
size = nr_slots * sizeof(struct msdos_dir_entry);
*nr_cluster = (size + (sbi->cluster_size - 1)) >> sbi->cluster_bits;
BUG_ON(*nr_cluster > 2);
err = fat_alloc_clusters(dir, cluster, *nr_cluster);
if (err)
goto error;
/*
* First stage: Fill the directory entry. NOTE: This cluster
* is not referenced from any inode yet, so updates order is
* not important.
*/
i = n = copy = 0;
do {
start_blknr = blknr = fat_clus_to_blknr(sbi, cluster[i]);
last_blknr = start_blknr + sbi->sec_per_clus;
while (blknr < last_blknr) {
bhs[n] = sb_getblk(sb, blknr);
if (!bhs[n]) {
err = -ENOMEM;
goto error_nomem;
}
/* fill the directory entry */
copy = min(size, sb->s_blocksize);
/* Avoid race with userspace read via bdev */
lock_buffer(bhs[n]);
memcpy(bhs[n]->b_data, slots, copy);
set_buffer_uptodate(bhs[n]);
unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
slots += copy;
size -= copy;
if (!size)
break;
n++;
blknr++;
}
} while (++i < *nr_cluster);
memset(bhs[n]->b_data + copy, 0, sb->s_blocksize - copy);
offset = copy - sizeof(struct msdos_dir_entry);
get_bh(bhs[n]);
*bh = bhs[n];
*de = (struct msdos_dir_entry *)((*bh)->b_data + offset);
*i_pos = fat_make_i_pos(sb, *bh, *de);
/* Second stage: clear the rest of cluster, and write outs */
err = fat_zeroed_cluster(dir, start_blknr, ++n, bhs, MAX_BUF_PER_PAGE);
if (err)
goto error_free;
return cluster[0];
error_free:
brelse(*bh);
*bh = NULL;
n = 0;
error_nomem:
for (i = 0; i < n; i++)
bforget(bhs[i]);
fat_free_clusters(dir, cluster[0]);
error:
return err;
}
int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
struct fat_slot_info *sinfo)
{
struct super_block *sb = dir->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */
struct msdos_dir_entry *de;
int err, free_slots, i, nr_bhs;
loff_t pos, i_pos;
sinfo->nr_slots = nr_slots;
/* First stage: search free directory entries */
free_slots = nr_bhs = 0;
bh = prev = NULL;
pos = 0;
err = -ENOSPC;
while (fat_get_entry(dir, &pos, &bh, &de) > -1) {
/* check the maximum size of directory */
if (pos >= FAT_MAX_DIR_SIZE)
goto error;
if (IS_FREE(de->name)) {
if (prev != bh) {
get_bh(bh);
bhs[nr_bhs] = prev = bh;
nr_bhs++;
}
free_slots++;
if (free_slots == nr_slots)
goto found;
} else {
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
prev = NULL;
free_slots = nr_bhs = 0;
}
}
if (dir->i_ino == MSDOS_ROOT_INO) {
if (!is_fat32(sbi))
goto error;
} else if (MSDOS_I(dir)->i_start == 0) {
fat_msg(sb, KERN_ERR, "Corrupted directory (i_pos %lld)",
MSDOS_I(dir)->i_pos);
err = -EIO;
goto error;
}
found:
err = 0;
pos -= free_slots * sizeof(*de);
nr_slots -= free_slots;
if (free_slots) {
/*
* Second stage: filling the free entries with new entries.
* NOTE: If this slots has shortname, first, we write
* the long name slots, then write the short name.
*/
int size = free_slots * sizeof(*de);
int offset = pos & (sb->s_blocksize - 1);
int long_bhs = nr_bhs - (nr_slots == 0);
/* Fill the long name slots. */
for (i = 0; i < long_bhs; i++) {
int copy = min_t(int, sb->s_blocksize - offset, size);
memcpy(bhs[i]->b_data + offset, slots, copy);
mark_buffer_dirty_inode(bhs[i], dir);
offset = 0;
slots += copy;
size -= copy;
}
if (long_bhs && IS_DIRSYNC(dir))
err = fat_sync_bhs(bhs, long_bhs);
if (!err && i < nr_bhs) {
/* Fill the short name slot. */
int copy = min_t(int, sb->s_blocksize - offset, size);
memcpy(bhs[i]->b_data + offset, slots, copy);
mark_buffer_dirty_inode(bhs[i], dir);
if (IS_DIRSYNC(dir))
err = sync_dirty_buffer(bhs[i]);
}
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
if (err)
goto error_remove;
}
if (nr_slots) {
int cluster, nr_cluster;
/*
* Third stage: allocate the cluster for new entries.
* And initialize the cluster with new entries, then
* add the cluster to dir.
*/
cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster,
&de, &bh, &i_pos);
if (cluster < 0) {
err = cluster;
goto error_remove;
}
err = fat_chain_add(dir, cluster, nr_cluster);
if (err) {
fat_free_clusters(dir, cluster);
goto error_remove;
}
if (dir->i_size & (sbi->cluster_size - 1)) {
fat_fs_error(sb, "Odd directory size");
dir->i_size = (dir->i_size + sbi->cluster_size - 1)
& ~((loff_t)sbi->cluster_size - 1);
}
dir->i_size += nr_cluster << sbi->cluster_bits;
MSDOS_I(dir)->mmu_private += nr_cluster << sbi->cluster_bits;
}
sinfo->slot_off = pos;
sinfo->de = de;
sinfo->bh = bh;
sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
return 0;
error:
brelse(bh);
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
return err;
error_remove:
brelse(bh);
if (free_slots)
__fat_remove_entries(dir, pos, free_slots);
return err;
}
EXPORT_SYMBOL_GPL(fat_add_entries);
| linux-master | fs/fat/dir.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/fat/inode.c
*
* Written 1992,1993 by Werner Almesberger
* VFAT extensions by Gordon Chaffee, merged with msdos fs by Henrik Storner
* Rewritten for the constant inumbers support by Al Viro
*
* Fixes:
*
* Max Cohan: Fixed invalid FSINFO offset when info_sector is 0
*/
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/mpage.h>
#include <linux/vfs.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <asm/unaligned.h>
#include <linux/random.h>
#include <linux/iversion.h>
#include "fat.h"
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
/* if user don't select VFAT, this is undefined. */
#define CONFIG_FAT_DEFAULT_IOCHARSET ""
#endif
#define KB_IN_SECTORS 2
/* DOS dates from 1980/1/1 through 2107/12/31 */
#define FAT_DATE_MIN (0<<9 | 1<<5 | 1)
#define FAT_DATE_MAX (127<<9 | 12<<5 | 31)
#define FAT_TIME_MAX (23<<11 | 59<<5 | 29)
/*
* A deserialized copy of the on-disk structure laid out in struct
* fat_boot_sector.
*/
struct fat_bios_param_block {
u16 fat_sector_size;
u8 fat_sec_per_clus;
u16 fat_reserved;
u8 fat_fats;
u16 fat_dir_entries;
u16 fat_sectors;
u16 fat_fat_length;
u32 fat_total_sect;
u8 fat16_state;
u32 fat16_vol_id;
u32 fat32_length;
u32 fat32_root_cluster;
u16 fat32_info_sector;
u8 fat32_state;
u32 fat32_vol_id;
};
static int fat_default_codepage = CONFIG_FAT_DEFAULT_CODEPAGE;
static char fat_default_iocharset[] = CONFIG_FAT_DEFAULT_IOCHARSET;
static struct fat_floppy_defaults {
unsigned nr_sectors;
unsigned sec_per_clus;
unsigned dir_entries;
unsigned media;
unsigned fat_length;
} floppy_defaults[] = {
{
.nr_sectors = 160 * KB_IN_SECTORS,
.sec_per_clus = 1,
.dir_entries = 64,
.media = 0xFE,
.fat_length = 1,
},
{
.nr_sectors = 180 * KB_IN_SECTORS,
.sec_per_clus = 1,
.dir_entries = 64,
.media = 0xFC,
.fat_length = 2,
},
{
.nr_sectors = 320 * KB_IN_SECTORS,
.sec_per_clus = 2,
.dir_entries = 112,
.media = 0xFF,
.fat_length = 1,
},
{
.nr_sectors = 360 * KB_IN_SECTORS,
.sec_per_clus = 2,
.dir_entries = 112,
.media = 0xFD,
.fat_length = 2,
},
};
int fat_add_cluster(struct inode *inode)
{
int err, cluster;
err = fat_alloc_clusters(inode, &cluster, 1);
if (err)
return err;
/* FIXME: this cluster should be added after data of this
* cluster is writed */
err = fat_chain_add(inode, cluster, 1);
if (err)
fat_free_clusters(inode, cluster);
return err;
}
static inline int __fat_get_block(struct inode *inode, sector_t iblock,
unsigned long *max_blocks,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
unsigned long mapped_blocks;
sector_t phys, last_block;
int err, offset;
err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false);
if (err)
return err;
if (phys) {
map_bh(bh_result, sb, phys);
*max_blocks = min(mapped_blocks, *max_blocks);
return 0;
}
if (!create)
return 0;
if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) {
fat_fs_error(sb, "corrupted file size (i_pos %lld, %lld)",
MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
return -EIO;
}
last_block = inode->i_blocks >> (sb->s_blocksize_bits - 9);
offset = (unsigned long)iblock & (sbi->sec_per_clus - 1);
/*
* allocate a cluster according to the following.
* 1) no more available blocks
* 2) not part of fallocate region
*/
if (!offset && !(iblock < last_block)) {
/* TODO: multiple cluster allocation would be desirable. */
err = fat_add_cluster(inode);
if (err)
return err;
}
/* available blocks on this cluster */
mapped_blocks = sbi->sec_per_clus - offset;
*max_blocks = min(mapped_blocks, *max_blocks);
MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits;
err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false);
if (err)
return err;
if (!phys) {
fat_fs_error(sb,
"invalid FAT chain (i_pos %lld, last_block %llu)",
MSDOS_I(inode)->i_pos,
(unsigned long long)last_block);
return -EIO;
}
BUG_ON(*max_blocks != mapped_blocks);
set_buffer_new(bh_result);
map_bh(bh_result, sb, phys);
return 0;
}
static int fat_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
int err;
err = __fat_get_block(inode, iblock, &max_blocks, bh_result, create);
if (err)
return err;
bh_result->b_size = max_blocks << sb->s_blocksize_bits;
return 0;
}
static int fat_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, fat_get_block);
}
static int fat_read_folio(struct file *file, struct folio *folio)
{
return mpage_read_folio(folio, fat_get_block);
}
static void fat_readahead(struct readahead_control *rac)
{
mpage_readahead(rac, fat_get_block);
}
static void fat_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
if (to > inode->i_size) {
truncate_pagecache(inode, inode->i_size);
fat_truncate_blocks(inode, inode->i_size);
}
}
static int fat_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
int err;
*pagep = NULL;
err = cont_write_begin(file, mapping, pos, len,
pagep, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
fat_write_failed(mapping, pos + len);
return err;
}
static int fat_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *pagep, void *fsdata)
{
struct inode *inode = mapping->host;
int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
if (err < len)
fat_write_failed(mapping, pos + len);
if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
mark_inode_dirty(inode);
}
return err;
}
static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
ssize_t ret;
if (iov_iter_rw(iter) == WRITE) {
/*
* FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
* so we need to update the ->mmu_private to block boundary.
*
* But we must fill the remaining area or hole by nul for
* updating ->mmu_private.
*
* Return 0, and fallback to normal buffered write.
*/
loff_t size = offset + count;
if (MSDOS_I(inode)->mmu_private < size)
return 0;
}
/*
* FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate().
*/
ret = blockdev_direct_IO(iocb, inode, iter, fat_get_block);
if (ret < 0 && iov_iter_rw(iter) == WRITE)
fat_write_failed(mapping, offset + count);
return ret;
}
static int fat_get_block_bmap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
int err;
sector_t bmap;
unsigned long mapped_blocks;
BUG_ON(create != 0);
err = fat_bmap(inode, iblock, &bmap, &mapped_blocks, create, true);
if (err)
return err;
if (bmap) {
map_bh(bh_result, sb, bmap);
max_blocks = min(mapped_blocks, max_blocks);
}
bh_result->b_size = max_blocks << sb->s_blocksize_bits;
return 0;
}
static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
{
sector_t blocknr;
/* fat_get_cluster() assumes the requested blocknr isn't truncated. */
down_read(&MSDOS_I(mapping->host)->truncate_lock);
blocknr = generic_block_bmap(mapping, block, fat_get_block_bmap);
up_read(&MSDOS_I(mapping->host)->truncate_lock);
return blocknr;
}
/*
* fat_block_truncate_page() zeroes out a mapping from file offset `from'
* up to the end of the block which corresponds to `from'.
* This is required during truncate to physically zeroout the tail end
* of that block so it doesn't yield old data if the file is later grown.
* Also, avoid causing failure from fsx for cases of "data past EOF"
*/
int fat_block_truncate_page(struct inode *inode, loff_t from)
{
return block_truncate_page(inode->i_mapping, from, fat_get_block);
}
static const struct address_space_operations fat_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = fat_read_folio,
.readahead = fat_readahead,
.writepages = fat_writepages,
.write_begin = fat_write_begin,
.write_end = fat_write_end,
.direct_IO = fat_direct_IO,
.bmap = _fat_bmap,
.migrate_folio = buffer_migrate_folio,
};
/*
* New FAT inode stuff. We do the following:
* a) i_ino is constant and has nothing with on-disk location.
* b) FAT manages its own cache of directory entries.
* c) *This* cache is indexed by on-disk location.
* d) inode has an associated directory entry, all right, but
* it may be unhashed.
* e) currently entries are stored within struct inode. That should
* change.
* f) we deal with races in the following way:
* 1. readdir() and lookup() do FAT-dir-cache lookup.
* 2. rename() unhashes the F-d-c entry and rehashes it in
* a new place.
* 3. unlink() and rmdir() unhash F-d-c entry.
* 4. fat_write_inode() checks whether the thing is unhashed.
* If it is we silently return. If it isn't we do bread(),
* check if the location is still valid and retry if it
* isn't. Otherwise we do changes.
* 5. Spinlock is used to protect hash/unhash/location check/lookup
* 6. fat_evict_inode() unhashes the F-d-c entry.
* 7. lookup() and readdir() do igrab() if they find a F-d-c entry
* and consider negative result as cache miss.
*/
static void fat_hash_init(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int i;
spin_lock_init(&sbi->inode_hash_lock);
for (i = 0; i < FAT_HASH_SIZE; i++)
INIT_HLIST_HEAD(&sbi->inode_hashtable[i]);
}
static inline unsigned long fat_hash(loff_t i_pos)
{
return hash_32(i_pos, FAT_HASH_BITS);
}
static void dir_hash_init(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int i;
spin_lock_init(&sbi->dir_hash_lock);
for (i = 0; i < FAT_HASH_SIZE; i++)
INIT_HLIST_HEAD(&sbi->dir_hashtable[i]);
}
void fat_attach(struct inode *inode, loff_t i_pos)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
if (inode->i_ino != MSDOS_ROOT_INO) {
struct hlist_head *head = sbi->inode_hashtable
+ fat_hash(i_pos);
spin_lock(&sbi->inode_hash_lock);
MSDOS_I(inode)->i_pos = i_pos;
hlist_add_head(&MSDOS_I(inode)->i_fat_hash, head);
spin_unlock(&sbi->inode_hash_lock);
}
/* If NFS support is enabled, cache the mapping of start cluster
* to directory inode. This is used during reconnection of
* dentries to the filesystem root.
*/
if (S_ISDIR(inode->i_mode) && sbi->options.nfs) {
struct hlist_head *d_head = sbi->dir_hashtable;
d_head += fat_dir_hash(MSDOS_I(inode)->i_logstart);
spin_lock(&sbi->dir_hash_lock);
hlist_add_head(&MSDOS_I(inode)->i_dir_hash, d_head);
spin_unlock(&sbi->dir_hash_lock);
}
}
EXPORT_SYMBOL_GPL(fat_attach);
void fat_detach(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
spin_lock(&sbi->inode_hash_lock);
MSDOS_I(inode)->i_pos = 0;
hlist_del_init(&MSDOS_I(inode)->i_fat_hash);
spin_unlock(&sbi->inode_hash_lock);
if (S_ISDIR(inode->i_mode) && sbi->options.nfs) {
spin_lock(&sbi->dir_hash_lock);
hlist_del_init(&MSDOS_I(inode)->i_dir_hash);
spin_unlock(&sbi->dir_hash_lock);
}
}
EXPORT_SYMBOL_GPL(fat_detach);
struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
struct msdos_inode_info *i;
struct inode *inode = NULL;
spin_lock(&sbi->inode_hash_lock);
hlist_for_each_entry(i, head, i_fat_hash) {
BUG_ON(i->vfs_inode.i_sb != sb);
if (i->i_pos != i_pos)
continue;
inode = igrab(&i->vfs_inode);
if (inode)
break;
}
spin_unlock(&sbi->inode_hash_lock);
return inode;
}
static int is_exec(unsigned char *extension)
{
unsigned char exe_extensions[] = "EXECOMBAT", *walk;
for (walk = exe_extensions; *walk; walk += 3)
if (!strncmp(extension, walk, 3))
return 1;
return 0;
}
static int fat_calc_dir_size(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int ret, fclus, dclus;
inode->i_size = 0;
if (MSDOS_I(inode)->i_start == 0)
return 0;
ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus);
if (ret < 0)
return ret;
inode->i_size = (fclus + 1) << sbi->cluster_bits;
return 0;
}
static int fat_validate_dir(struct inode *dir)
{
struct super_block *sb = dir->i_sb;
if (dir->i_nlink < 2) {
/* Directory should have "."/".." entries at least. */
fat_fs_error(sb, "corrupted directory (invalid entries)");
return -EIO;
}
if (MSDOS_I(dir)->i_start == 0 ||
MSDOS_I(dir)->i_start == MSDOS_SB(sb)->root_cluster) {
/* Directory should point valid cluster. */
fat_fs_error(sb, "corrupted directory (invalid i_start)");
return -EIO;
}
return 0;
}
/* doesn't deal with root inode */
int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int error;
MSDOS_I(inode)->i_pos = 0;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
inode->i_generation = get_random_u32();
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
inode->i_generation &= ~1;
inode->i_mode = fat_make_mode(sbi, de->attr, S_IRWXUGO);
inode->i_op = sbi->dir_ops;
inode->i_fop = &fat_dir_operations;
MSDOS_I(inode)->i_start = fat_get_start(sbi, de);
MSDOS_I(inode)->i_logstart = MSDOS_I(inode)->i_start;
error = fat_calc_dir_size(inode);
if (error < 0)
return error;
MSDOS_I(inode)->mmu_private = inode->i_size;
set_nlink(inode, fat_subdirs(inode));
error = fat_validate_dir(inode);
if (error < 0)
return error;
} else { /* not a directory */
inode->i_generation |= 1;
inode->i_mode = fat_make_mode(sbi, de->attr,
((sbi->options.showexec && !is_exec(de->name + 8))
? S_IRUGO|S_IWUGO : S_IRWXUGO));
MSDOS_I(inode)->i_start = fat_get_start(sbi, de);
MSDOS_I(inode)->i_logstart = MSDOS_I(inode)->i_start;
inode->i_size = le32_to_cpu(de->size);
inode->i_op = &fat_file_inode_operations;
inode->i_fop = &fat_file_operations;
inode->i_mapping->a_ops = &fat_aops;
MSDOS_I(inode)->mmu_private = inode->i_size;
}
if (de->attr & ATTR_SYS) {
if (sbi->options.sys_immutable)
inode->i_flags |= S_IMMUTABLE;
}
fat_save_attrs(inode, de->attr);
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
inode_set_ctime_to_ts(inode, inode->i_mtime);
if (sbi->options.isvfat) {
fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
fat_time_fat2unix(sbi, &MSDOS_I(inode)->i_crtime, de->ctime,
de->cdate, de->ctime_cs);
} else
inode->i_atime = fat_truncate_atime(sbi, &inode->i_mtime);
return 0;
}
static inline void fat_lock_build_inode(struct msdos_sb_info *sbi)
{
if (sbi->options.nfs == FAT_NFS_NOSTALE_RO)
mutex_lock(&sbi->nfs_build_inode_lock);
}
static inline void fat_unlock_build_inode(struct msdos_sb_info *sbi)
{
if (sbi->options.nfs == FAT_NFS_NOSTALE_RO)
mutex_unlock(&sbi->nfs_build_inode_lock);
}
struct inode *fat_build_inode(struct super_block *sb,
struct msdos_dir_entry *de, loff_t i_pos)
{
struct inode *inode;
int err;
fat_lock_build_inode(MSDOS_SB(sb));
inode = fat_iget(sb, i_pos);
if (inode)
goto out;
inode = new_inode(sb);
if (!inode) {
inode = ERR_PTR(-ENOMEM);
goto out;
}
inode->i_ino = iunique(sb, MSDOS_ROOT_INO);
inode_set_iversion(inode, 1);
err = fat_fill_inode(inode, de);
if (err) {
iput(inode);
inode = ERR_PTR(err);
goto out;
}
fat_attach(inode, i_pos);
insert_inode_hash(inode);
out:
fat_unlock_build_inode(MSDOS_SB(sb));
return inode;
}
EXPORT_SYMBOL_GPL(fat_build_inode);
static int __fat_write_inode(struct inode *inode, int wait);
static void fat_free_eofblocks(struct inode *inode)
{
/* Release unwritten fallocated blocks on inode eviction. */
if ((inode->i_blocks << 9) >
round_up(MSDOS_I(inode)->mmu_private,
MSDOS_SB(inode->i_sb)->cluster_size)) {
int err;
fat_truncate_blocks(inode, MSDOS_I(inode)->mmu_private);
/* Fallocate results in updating the i_start/iogstart
* for the zero byte file. So, make it return to
* original state during evict and commit it to avoid
* any corruption on the next access to the cluster
* chain for the file.
*/
err = __fat_write_inode(inode, inode_needs_sync(inode));
if (err) {
fat_msg(inode->i_sb, KERN_WARNING, "Failed to "
"update on disk inode for unused "
"fallocated blocks, inode could be "
"corrupted. Please run fsck");
}
}
}
static void fat_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
if (!inode->i_nlink) {
inode->i_size = 0;
fat_truncate_blocks(inode, 0);
} else
fat_free_eofblocks(inode);
invalidate_inode_buffers(inode);
clear_inode(inode);
fat_cache_inval_inode(inode);
fat_detach(inode);
}
static void fat_set_state(struct super_block *sb,
unsigned int set, unsigned int force)
{
struct buffer_head *bh;
struct fat_boot_sector *b;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
/* do not change any thing if mounted read only */
if (sb_rdonly(sb) && !force)
return;
/* do not change state if fs was dirty */
if (sbi->dirty) {
/* warn only on set (mount). */
if (set)
fat_msg(sb, KERN_WARNING, "Volume was not properly "
"unmounted. Some data may be corrupt. "
"Please run fsck.");
return;
}
bh = sb_bread(sb, 0);
if (bh == NULL) {
fat_msg(sb, KERN_ERR, "unable to read boot sector "
"to mark fs as dirty");
return;
}
b = (struct fat_boot_sector *) bh->b_data;
if (is_fat32(sbi)) {
if (set)
b->fat32.state |= FAT_STATE_DIRTY;
else
b->fat32.state &= ~FAT_STATE_DIRTY;
} else /* fat 16 and 12 */ {
if (set)
b->fat16.state |= FAT_STATE_DIRTY;
else
b->fat16.state &= ~FAT_STATE_DIRTY;
}
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
}
static void fat_reset_iocharset(struct fat_mount_options *opts)
{
if (opts->iocharset != fat_default_iocharset) {
/* Note: opts->iocharset can be NULL here */
kfree(opts->iocharset);
opts->iocharset = fat_default_iocharset;
}
}
static void delayed_free(struct rcu_head *p)
{
struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
unload_nls(sbi->nls_disk);
unload_nls(sbi->nls_io);
fat_reset_iocharset(&sbi->options);
kfree(sbi);
}
static void fat_put_super(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
fat_set_state(sb, 0, 0);
iput(sbi->fsinfo_inode);
iput(sbi->fat_inode);
call_rcu(&sbi->rcu, delayed_free);
}
static struct kmem_cache *fat_inode_cachep;
static struct inode *fat_alloc_inode(struct super_block *sb)
{
struct msdos_inode_info *ei;
ei = alloc_inode_sb(sb, fat_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
init_rwsem(&ei->truncate_lock);
/* Zeroing to allow iput() even if partial initialized inode. */
ei->mmu_private = 0;
ei->i_start = 0;
ei->i_logstart = 0;
ei->i_attrs = 0;
ei->i_pos = 0;
ei->i_crtime.tv_sec = 0;
ei->i_crtime.tv_nsec = 0;
return &ei->vfs_inode;
}
static void fat_free_inode(struct inode *inode)
{
kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
}
static void init_once(void *foo)
{
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
spin_lock_init(&ei->cache_lru_lock);
ei->nr_caches = 0;
ei->cache_valid_id = FAT_CACHE_VALID + 1;
INIT_LIST_HEAD(&ei->cache_lru);
INIT_HLIST_NODE(&ei->i_fat_hash);
INIT_HLIST_NODE(&ei->i_dir_hash);
inode_init_once(&ei->vfs_inode);
}
static int __init fat_init_inodecache(void)
{
fat_inode_cachep = kmem_cache_create("fat_inode_cache",
sizeof(struct msdos_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
if (fat_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void __exit fat_destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(fat_inode_cachep);
}
static int fat_remount(struct super_block *sb, int *flags, char *data)
{
bool new_rdonly;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
*flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
sync_filesystem(sb);
/* make sure we update state on remount. */
new_rdonly = *flags & SB_RDONLY;
if (new_rdonly != sb_rdonly(sb)) {
if (new_rdonly)
fat_set_state(sb, 0, 0);
else
fat_set_state(sb, 1, 1);
}
return 0;
}
static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
/* If the count of free cluster is still unknown, counts it here. */
if (sbi->free_clusters == -1 || !sbi->free_clus_valid) {
int err = fat_count_free_clusters(dentry->d_sb);
if (err)
return err;
}
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = sbi->cluster_size;
buf->f_blocks = sbi->max_cluster - FAT_START_ENT;
buf->f_bfree = sbi->free_clusters;
buf->f_bavail = sbi->free_clusters;
buf->f_fsid = u64_to_fsid(id);
buf->f_namelen =
(sbi->options.isvfat ? FAT_LFN_LEN : 12) * NLS_MAX_CHARSET_SIZE;
return 0;
}
static int __fat_write_inode(struct inode *inode, int wait)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct msdos_dir_entry *raw_entry;
loff_t i_pos;
sector_t blocknr;
int err, offset;
if (inode->i_ino == MSDOS_ROOT_INO)
return 0;
retry:
i_pos = fat_i_pos_read(sbi, inode);
if (!i_pos)
return 0;
fat_get_blknr_offset(sbi, i_pos, &blocknr, &offset);
bh = sb_bread(sb, blocknr);
if (!bh) {
fat_msg(sb, KERN_ERR, "unable to read inode block "
"for updating (i_pos %lld)", i_pos);
return -EIO;
}
spin_lock(&sbi->inode_hash_lock);
if (i_pos != MSDOS_I(inode)->i_pos) {
spin_unlock(&sbi->inode_hash_lock);
brelse(bh);
goto retry;
}
raw_entry = &((struct msdos_dir_entry *) (bh->b_data))[offset];
if (S_ISDIR(inode->i_mode))
raw_entry->size = 0;
else
raw_entry->size = cpu_to_le32(inode->i_size);
raw_entry->attr = fat_make_attrs(inode);
fat_set_start(raw_entry, MSDOS_I(inode)->i_logstart);
fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
__le16 atime;
fat_time_unix2fat(sbi, &inode->i_atime, &atime,
&raw_entry->adate, NULL);
fat_time_unix2fat(sbi, &MSDOS_I(inode)->i_crtime, &raw_entry->ctime,
&raw_entry->cdate, &raw_entry->ctime_cs);
}
spin_unlock(&sbi->inode_hash_lock);
mark_buffer_dirty(bh);
err = 0;
if (wait)
err = sync_dirty_buffer(bh);
brelse(bh);
return err;
}
static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
if (inode->i_ino == MSDOS_FSINFO_INO) {
struct super_block *sb = inode->i_sb;
mutex_lock(&MSDOS_SB(sb)->s_lock);
err = fat_clusters_flush(sb);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
} else
err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
return err;
}
int fat_sync_inode(struct inode *inode)
{
return __fat_write_inode(inode, 1);
}
EXPORT_SYMBOL_GPL(fat_sync_inode);
static int fat_show_options(struct seq_file *m, struct dentry *root);
static const struct super_operations fat_sops = {
.alloc_inode = fat_alloc_inode,
.free_inode = fat_free_inode,
.write_inode = fat_write_inode,
.evict_inode = fat_evict_inode,
.put_super = fat_put_super,
.statfs = fat_statfs,
.remount_fs = fat_remount,
.show_options = fat_show_options,
};
static int fat_show_options(struct seq_file *m, struct dentry *root)
{
struct msdos_sb_info *sbi = MSDOS_SB(root->d_sb);
struct fat_mount_options *opts = &sbi->options;
int isvfat = opts->isvfat;
if (!uid_eq(opts->fs_uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
from_kuid_munged(&init_user_ns, opts->fs_uid));
if (!gid_eq(opts->fs_gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
from_kgid_munged(&init_user_ns, opts->fs_gid));
seq_printf(m, ",fmask=%04o", opts->fs_fmask);
seq_printf(m, ",dmask=%04o", opts->fs_dmask);
if (opts->allow_utime)
seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
if (sbi->nls_disk)
/* strip "cp" prefix from displayed option */
seq_printf(m, ",codepage=%s", &sbi->nls_disk->charset[2]);
if (isvfat) {
if (sbi->nls_io)
seq_printf(m, ",iocharset=%s", sbi->nls_io->charset);
switch (opts->shortname) {
case VFAT_SFN_DISPLAY_WIN95 | VFAT_SFN_CREATE_WIN95:
seq_puts(m, ",shortname=win95");
break;
case VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WINNT:
seq_puts(m, ",shortname=winnt");
break;
case VFAT_SFN_DISPLAY_WINNT | VFAT_SFN_CREATE_WIN95:
seq_puts(m, ",shortname=mixed");
break;
case VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95:
seq_puts(m, ",shortname=lower");
break;
default:
seq_puts(m, ",shortname=unknown");
break;
}
}
if (opts->name_check != 'n')
seq_printf(m, ",check=%c", opts->name_check);
if (opts->usefree)
seq_puts(m, ",usefree");
if (opts->quiet)
seq_puts(m, ",quiet");
if (opts->showexec)
seq_puts(m, ",showexec");
if (opts->sys_immutable)
seq_puts(m, ",sys_immutable");
if (!isvfat) {
if (opts->dotsOK)
seq_puts(m, ",dotsOK=yes");
if (opts->nocase)
seq_puts(m, ",nocase");
} else {
if (opts->utf8)
seq_puts(m, ",utf8");
if (opts->unicode_xlate)
seq_puts(m, ",uni_xlate");
if (!opts->numtail)
seq_puts(m, ",nonumtail");
if (opts->rodir)
seq_puts(m, ",rodir");
}
if (opts->flush)
seq_puts(m, ",flush");
if (opts->tz_set) {
if (opts->time_offset)
seq_printf(m, ",time_offset=%d", opts->time_offset);
else
seq_puts(m, ",tz=UTC");
}
if (opts->errors == FAT_ERRORS_CONT)
seq_puts(m, ",errors=continue");
else if (opts->errors == FAT_ERRORS_PANIC)
seq_puts(m, ",errors=panic");
else
seq_puts(m, ",errors=remount-ro");
if (opts->nfs == FAT_NFS_NOSTALE_RO)
seq_puts(m, ",nfs=nostale_ro");
else if (opts->nfs)
seq_puts(m, ",nfs=stale_rw");
if (opts->discard)
seq_puts(m, ",discard");
if (opts->dos1xfloppy)
seq_puts(m, ",dos1xfloppy");
return 0;
}
enum {
Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid,
Opt_umask, Opt_dmask, Opt_fmask, Opt_allow_utime, Opt_codepage,
Opt_usefree, Opt_nocase, Opt_quiet, Opt_showexec, Opt_debug,
Opt_immutable, Opt_dots, Opt_nodots,
Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
Opt_err_panic, Opt_err_ro, Opt_discard, Opt_nfs, Opt_time_offset,
Opt_nfs_stale_rw, Opt_nfs_nostale_ro, Opt_err, Opt_dos1xfloppy,
};
static const match_table_t fat_tokens = {
{Opt_check_r, "check=relaxed"},
{Opt_check_s, "check=strict"},
{Opt_check_n, "check=normal"},
{Opt_check_r, "check=r"},
{Opt_check_s, "check=s"},
{Opt_check_n, "check=n"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_umask, "umask=%o"},
{Opt_dmask, "dmask=%o"},
{Opt_fmask, "fmask=%o"},
{Opt_allow_utime, "allow_utime=%o"},
{Opt_codepage, "codepage=%u"},
{Opt_usefree, "usefree"},
{Opt_nocase, "nocase"},
{Opt_quiet, "quiet"},
{Opt_showexec, "showexec"},
{Opt_debug, "debug"},
{Opt_immutable, "sys_immutable"},
{Opt_flush, "flush"},
{Opt_tz_utc, "tz=UTC"},
{Opt_time_offset, "time_offset=%d"},
{Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_discard, "discard"},
{Opt_nfs_stale_rw, "nfs"},
{Opt_nfs_stale_rw, "nfs=stale_rw"},
{Opt_nfs_nostale_ro, "nfs=nostale_ro"},
{Opt_dos1xfloppy, "dos1xfloppy"},
{Opt_obsolete, "conv=binary"},
{Opt_obsolete, "conv=text"},
{Opt_obsolete, "conv=auto"},
{Opt_obsolete, "conv=b"},
{Opt_obsolete, "conv=t"},
{Opt_obsolete, "conv=a"},
{Opt_obsolete, "fat=%u"},
{Opt_obsolete, "blocksize=%u"},
{Opt_obsolete, "cvf_format=%20s"},
{Opt_obsolete, "cvf_options=%100s"},
{Opt_obsolete, "posix"},
{Opt_err, NULL},
};
static const match_table_t msdos_tokens = {
{Opt_nodots, "nodots"},
{Opt_nodots, "dotsOK=no"},
{Opt_dots, "dots"},
{Opt_dots, "dotsOK=yes"},
{Opt_err, NULL}
};
static const match_table_t vfat_tokens = {
{Opt_charset, "iocharset=%s"},
{Opt_shortname_lower, "shortname=lower"},
{Opt_shortname_win95, "shortname=win95"},
{Opt_shortname_winnt, "shortname=winnt"},
{Opt_shortname_mixed, "shortname=mixed"},
{Opt_utf8_no, "utf8=0"}, /* 0 or no or false */
{Opt_utf8_no, "utf8=no"},
{Opt_utf8_no, "utf8=false"},
{Opt_utf8_yes, "utf8=1"}, /* empty or 1 or yes or true */
{Opt_utf8_yes, "utf8=yes"},
{Opt_utf8_yes, "utf8=true"},
{Opt_utf8_yes, "utf8"},
{Opt_uni_xl_no, "uni_xlate=0"}, /* 0 or no or false */
{Opt_uni_xl_no, "uni_xlate=no"},
{Opt_uni_xl_no, "uni_xlate=false"},
{Opt_uni_xl_yes, "uni_xlate=1"}, /* empty or 1 or yes or true */
{Opt_uni_xl_yes, "uni_xlate=yes"},
{Opt_uni_xl_yes, "uni_xlate=true"},
{Opt_uni_xl_yes, "uni_xlate"},
{Opt_nonumtail_no, "nonumtail=0"}, /* 0 or no or false */
{Opt_nonumtail_no, "nonumtail=no"},
{Opt_nonumtail_no, "nonumtail=false"},
{Opt_nonumtail_yes, "nonumtail=1"}, /* empty or 1 or yes or true */
{Opt_nonumtail_yes, "nonumtail=yes"},
{Opt_nonumtail_yes, "nonumtail=true"},
{Opt_nonumtail_yes, "nonumtail"},
{Opt_rodir, "rodir"},
{Opt_err, NULL}
};
static int parse_options(struct super_block *sb, char *options, int is_vfat,
int silent, int *debug, struct fat_mount_options *opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
char *iocharset;
opts->isvfat = is_vfat;
opts->fs_uid = current_uid();
opts->fs_gid = current_gid();
opts->fs_fmask = opts->fs_dmask = current_umask();
opts->allow_utime = -1;
opts->codepage = fat_default_codepage;
fat_reset_iocharset(opts);
if (is_vfat) {
opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
opts->rodir = 0;
} else {
opts->shortname = 0;
opts->rodir = 1;
}
opts->name_check = 'n';
opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0;
opts->unicode_xlate = 0;
opts->numtail = 1;
opts->usefree = opts->nocase = 0;
opts->tz_set = 0;
opts->nfs = 0;
opts->errors = FAT_ERRORS_RO;
*debug = 0;
opts->utf8 = IS_ENABLED(CONFIG_FAT_DEFAULT_UTF8) && is_vfat;
if (!options)
goto out;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, fat_tokens, args);
if (token == Opt_err) {
if (is_vfat)
token = match_token(p, vfat_tokens, args);
else
token = match_token(p, msdos_tokens, args);
}
switch (token) {
case Opt_check_s:
opts->name_check = 's';
break;
case Opt_check_r:
opts->name_check = 'r';
break;
case Opt_check_n:
opts->name_check = 'n';
break;
case Opt_usefree:
opts->usefree = 1;
break;
case Opt_nocase:
if (!is_vfat)
opts->nocase = 1;
else {
/* for backward compatibility */
opts->shortname = VFAT_SFN_DISPLAY_WIN95
| VFAT_SFN_CREATE_WIN95;
}
break;
case Opt_quiet:
opts->quiet = 1;
break;
case Opt_showexec:
opts->showexec = 1;
break;
case Opt_debug:
*debug = 1;
break;
case Opt_immutable:
opts->sys_immutable = 1;
break;
case Opt_uid:
if (match_int(&args[0], &option))
return -EINVAL;
opts->fs_uid = make_kuid(current_user_ns(), option);
if (!uid_valid(opts->fs_uid))
return -EINVAL;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return -EINVAL;
opts->fs_gid = make_kgid(current_user_ns(), option);
if (!gid_valid(opts->fs_gid))
return -EINVAL;
break;
case Opt_umask:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->fs_fmask = opts->fs_dmask = option;
break;
case Opt_dmask:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->fs_dmask = option;
break;
case Opt_fmask:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->fs_fmask = option;
break;
case Opt_allow_utime:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->allow_utime = option & (S_IWGRP | S_IWOTH);
break;
case Opt_codepage:
if (match_int(&args[0], &option))
return -EINVAL;
opts->codepage = option;
break;
case Opt_flush:
opts->flush = 1;
break;
case Opt_time_offset:
if (match_int(&args[0], &option))
return -EINVAL;
/*
* GMT+-12 zones may have DST corrections so at least
* 13 hours difference is needed. Make the limit 24
* just in case someone invents something unusual.
*/
if (option < -24 * 60 || option > 24 * 60)
return -EINVAL;
opts->tz_set = 1;
opts->time_offset = option;
break;
case Opt_tz_utc:
opts->tz_set = 1;
opts->time_offset = 0;
break;
case Opt_err_cont:
opts->errors = FAT_ERRORS_CONT;
break;
case Opt_err_panic:
opts->errors = FAT_ERRORS_PANIC;
break;
case Opt_err_ro:
opts->errors = FAT_ERRORS_RO;
break;
case Opt_nfs_stale_rw:
opts->nfs = FAT_NFS_STALE_RW;
break;
case Opt_nfs_nostale_ro:
opts->nfs = FAT_NFS_NOSTALE_RO;
break;
case Opt_dos1xfloppy:
opts->dos1xfloppy = 1;
break;
/* msdos specific */
case Opt_dots:
opts->dotsOK = 1;
break;
case Opt_nodots:
opts->dotsOK = 0;
break;
/* vfat specific */
case Opt_charset:
fat_reset_iocharset(opts);
iocharset = match_strdup(&args[0]);
if (!iocharset)
return -ENOMEM;
opts->iocharset = iocharset;
break;
case Opt_shortname_lower:
opts->shortname = VFAT_SFN_DISPLAY_LOWER
| VFAT_SFN_CREATE_WIN95;
break;
case Opt_shortname_win95:
opts->shortname = VFAT_SFN_DISPLAY_WIN95
| VFAT_SFN_CREATE_WIN95;
break;
case Opt_shortname_winnt:
opts->shortname = VFAT_SFN_DISPLAY_WINNT
| VFAT_SFN_CREATE_WINNT;
break;
case Opt_shortname_mixed:
opts->shortname = VFAT_SFN_DISPLAY_WINNT
| VFAT_SFN_CREATE_WIN95;
break;
case Opt_utf8_no: /* 0 or no or false */
opts->utf8 = 0;
break;
case Opt_utf8_yes: /* empty or 1 or yes or true */
opts->utf8 = 1;
break;
case Opt_uni_xl_no: /* 0 or no or false */
opts->unicode_xlate = 0;
break;
case Opt_uni_xl_yes: /* empty or 1 or yes or true */
opts->unicode_xlate = 1;
break;
case Opt_nonumtail_no: /* 0 or no or false */
opts->numtail = 1; /* negated option */
break;
case Opt_nonumtail_yes: /* empty or 1 or yes or true */
opts->numtail = 0; /* negated option */
break;
case Opt_rodir:
opts->rodir = 1;
break;
case Opt_discard:
opts->discard = 1;
break;
/* obsolete mount options */
case Opt_obsolete:
fat_msg(sb, KERN_INFO, "\"%s\" option is obsolete, "
"not supported now", p);
break;
/* unknown option */
default:
if (!silent) {
fat_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" "
"or missing value", p);
}
return -EINVAL;
}
}
out:
/* UTF-8 doesn't provide FAT semantics */
if (!strcmp(opts->iocharset, "utf8")) {
fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset"
" for FAT filesystems, filesystem will be "
"case sensitive!");
}
/* If user doesn't specify allow_utime, it's initialized from dmask. */
if (opts->allow_utime == (unsigned short)-1)
opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
if (opts->unicode_xlate)
opts->utf8 = 0;
if (opts->nfs == FAT_NFS_NOSTALE_RO) {
sb->s_flags |= SB_RDONLY;
sb->s_export_op = &fat_export_ops_nostale;
}
return 0;
}
static int fat_read_root(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int error;
MSDOS_I(inode)->i_pos = MSDOS_ROOT_INO;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
inode->i_generation = 0;
inode->i_mode = fat_make_mode(sbi, ATTR_DIR, S_IRWXUGO);
inode->i_op = sbi->dir_ops;
inode->i_fop = &fat_dir_operations;
if (is_fat32(sbi)) {
MSDOS_I(inode)->i_start = sbi->root_cluster;
error = fat_calc_dir_size(inode);
if (error < 0)
return error;
} else {
MSDOS_I(inode)->i_start = 0;
inode->i_size = sbi->dir_entries * sizeof(struct msdos_dir_entry);
}
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
MSDOS_I(inode)->i_logstart = 0;
MSDOS_I(inode)->mmu_private = inode->i_size;
fat_save_attrs(inode, ATTR_DIR);
inode->i_mtime = inode->i_atime = inode_set_ctime(inode, 0, 0);
set_nlink(inode, fat_subdirs(inode)+2);
return 0;
}
static unsigned long calc_fat_clusters(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
/* Divide first to avoid overflow */
if (!is_fat12(sbi)) {
unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
return ent_per_sec * sbi->fat_length;
}
return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
}
static bool fat_bpb_is_zero(struct fat_boot_sector *b)
{
if (get_unaligned_le16(&b->sector_size))
return false;
if (b->sec_per_clus)
return false;
if (b->reserved)
return false;
if (b->fats)
return false;
if (get_unaligned_le16(&b->dir_entries))
return false;
if (get_unaligned_le16(&b->sectors))
return false;
if (b->media)
return false;
if (b->fat_length)
return false;
if (b->secs_track)
return false;
if (b->heads)
return false;
return true;
}
static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b,
int silent, struct fat_bios_param_block *bpb)
{
int error = -EINVAL;
/* Read in BPB ... */
memset(bpb, 0, sizeof(*bpb));
bpb->fat_sector_size = get_unaligned_le16(&b->sector_size);
bpb->fat_sec_per_clus = b->sec_per_clus;
bpb->fat_reserved = le16_to_cpu(b->reserved);
bpb->fat_fats = b->fats;
bpb->fat_dir_entries = get_unaligned_le16(&b->dir_entries);
bpb->fat_sectors = get_unaligned_le16(&b->sectors);
bpb->fat_fat_length = le16_to_cpu(b->fat_length);
bpb->fat_total_sect = le32_to_cpu(b->total_sect);
bpb->fat16_state = b->fat16.state;
bpb->fat16_vol_id = get_unaligned_le32(b->fat16.vol_id);
bpb->fat32_length = le32_to_cpu(b->fat32.length);
bpb->fat32_root_cluster = le32_to_cpu(b->fat32.root_cluster);
bpb->fat32_info_sector = le16_to_cpu(b->fat32.info_sector);
bpb->fat32_state = b->fat32.state;
bpb->fat32_vol_id = get_unaligned_le32(b->fat32.vol_id);
/* Validate this looks like a FAT filesystem BPB */
if (!bpb->fat_reserved) {
if (!silent)
fat_msg(sb, KERN_ERR,
"bogus number of reserved sectors");
goto out;
}
if (!bpb->fat_fats) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus number of FAT structure");
goto out;
}
/*
* Earlier we checked here that b->secs_track and b->head are nonzero,
* but it turns out valid FAT filesystems can have zero there.
*/
if (!fat_valid_media(b->media)) {
if (!silent)
fat_msg(sb, KERN_ERR, "invalid media value (0x%02x)",
(unsigned)b->media);
goto out;
}
if (!is_power_of_2(bpb->fat_sector_size)
|| (bpb->fat_sector_size < 512)
|| (bpb->fat_sector_size > 4096)) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus logical sector size %u",
(unsigned)bpb->fat_sector_size);
goto out;
}
if (!is_power_of_2(bpb->fat_sec_per_clus)) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus sectors per cluster %u",
(unsigned)bpb->fat_sec_per_clus);
goto out;
}
if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus number of FAT sectors");
goto out;
}
error = 0;
out:
return error;
}
static int fat_read_static_bpb(struct super_block *sb,
struct fat_boot_sector *b, int silent,
struct fat_bios_param_block *bpb)
{
static const char *notdos1x = "This doesn't look like a DOS 1.x volume";
sector_t bd_sects = bdev_nr_sectors(sb->s_bdev);
struct fat_floppy_defaults *fdefaults = NULL;
int error = -EINVAL;
unsigned i;
/* 16-bit DOS 1.x reliably wrote bootstrap short-jmp code */
if (b->ignored[0] != 0xeb || b->ignored[2] != 0x90) {
if (!silent)
fat_msg(sb, KERN_ERR,
"%s; no bootstrapping code", notdos1x);
goto out;
}
/*
* If any value in this region is non-zero, it isn't archaic
* DOS.
*/
if (!fat_bpb_is_zero(b)) {
if (!silent)
fat_msg(sb, KERN_ERR,
"%s; DOS 2.x BPB is non-zero", notdos1x);
goto out;
}
for (i = 0; i < ARRAY_SIZE(floppy_defaults); i++) {
if (floppy_defaults[i].nr_sectors == bd_sects) {
fdefaults = &floppy_defaults[i];
break;
}
}
if (fdefaults == NULL) {
if (!silent)
fat_msg(sb, KERN_WARNING,
"This looks like a DOS 1.x volume, but isn't a recognized floppy size (%llu sectors)",
(u64)bd_sects);
goto out;
}
if (!silent)
fat_msg(sb, KERN_INFO,
"This looks like a DOS 1.x volume; assuming default BPB values");
memset(bpb, 0, sizeof(*bpb));
bpb->fat_sector_size = SECTOR_SIZE;
bpb->fat_sec_per_clus = fdefaults->sec_per_clus;
bpb->fat_reserved = 1;
bpb->fat_fats = 2;
bpb->fat_dir_entries = fdefaults->dir_entries;
bpb->fat_sectors = fdefaults->nr_sectors;
bpb->fat_fat_length = fdefaults->fat_length;
error = 0;
out:
return error;
}
/*
* Read the super block of an MS-DOS FS.
*/
int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
void (*setup)(struct super_block *))
{
struct inode *root_inode = NULL, *fat_inode = NULL;
struct inode *fsinfo_inode = NULL;
struct buffer_head *bh;
struct fat_bios_param_block bpb;
struct msdos_sb_info *sbi;
u16 logical_sector_size;
u32 total_sectors, total_clusters, fat_clusters, rootdir_sectors;
int debug;
long error;
char buf[50];
struct timespec64 ts;
/*
* GFP_KERNEL is ok here, because while we do hold the
* superblock lock, memory pressure can't call back into
* the filesystem, since we're only just about to mount
* it and have no inodes etc active!
*/
sbi = kzalloc(sizeof(struct msdos_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
sb->s_flags |= SB_NODIRATIME;
sb->s_magic = MSDOS_SUPER_MAGIC;
sb->s_op = &fat_sops;
sb->s_export_op = &fat_export_ops;
/*
* fat timestamps are complex and truncated by fat itself, so
* we set 1 here to be fast
*/
sb->s_time_gran = 1;
mutex_init(&sbi->nfs_build_inode_lock);
ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
error = parse_options(sb, data, isvfat, silent, &debug, &sbi->options);
if (error)
goto out_fail;
setup(sb); /* flavour-specific stuff that needs options */
error = -EIO;
sb_min_blocksize(sb, 512);
bh = sb_bread(sb, 0);
if (bh == NULL) {
fat_msg(sb, KERN_ERR, "unable to read boot sector");
goto out_fail;
}
error = fat_read_bpb(sb, (struct fat_boot_sector *)bh->b_data, silent,
&bpb);
if (error == -EINVAL && sbi->options.dos1xfloppy)
error = fat_read_static_bpb(sb,
(struct fat_boot_sector *)bh->b_data, silent, &bpb);
brelse(bh);
if (error == -EINVAL)
goto out_invalid;
else if (error)
goto out_fail;
logical_sector_size = bpb.fat_sector_size;
sbi->sec_per_clus = bpb.fat_sec_per_clus;
error = -EIO;
if (logical_sector_size < sb->s_blocksize) {
fat_msg(sb, KERN_ERR, "logical sector size too small for device"
" (logical sector size = %u)", logical_sector_size);
goto out_fail;
}
if (logical_sector_size > sb->s_blocksize) {
struct buffer_head *bh_resize;
if (!sb_set_blocksize(sb, logical_sector_size)) {
fat_msg(sb, KERN_ERR, "unable to set blocksize %u",
logical_sector_size);
goto out_fail;
}
/* Verify that the larger boot sector is fully readable */
bh_resize = sb_bread(sb, 0);
if (bh_resize == NULL) {
fat_msg(sb, KERN_ERR, "unable to read boot sector"
" (logical sector size = %lu)",
sb->s_blocksize);
goto out_fail;
}
brelse(bh_resize);
}
mutex_init(&sbi->s_lock);
sbi->cluster_size = sb->s_blocksize * sbi->sec_per_clus;
sbi->cluster_bits = ffs(sbi->cluster_size) - 1;
sbi->fats = bpb.fat_fats;
sbi->fat_bits = 0; /* Don't know yet */
sbi->fat_start = bpb.fat_reserved;
sbi->fat_length = bpb.fat_fat_length;
sbi->root_cluster = 0;
sbi->free_clusters = -1; /* Don't know yet */
sbi->free_clus_valid = 0;
sbi->prev_free = FAT_START_ENT;
sb->s_maxbytes = 0xffffffff;
fat_time_fat2unix(sbi, &ts, 0, cpu_to_le16(FAT_DATE_MIN), 0);
sb->s_time_min = ts.tv_sec;
fat_time_fat2unix(sbi, &ts, cpu_to_le16(FAT_TIME_MAX),
cpu_to_le16(FAT_DATE_MAX), 0);
sb->s_time_max = ts.tv_sec;
if (!sbi->fat_length && bpb.fat32_length) {
struct fat_boot_fsinfo *fsinfo;
struct buffer_head *fsinfo_bh;
/* Must be FAT32 */
sbi->fat_bits = 32;
sbi->fat_length = bpb.fat32_length;
sbi->root_cluster = bpb.fat32_root_cluster;
/* MC - if info_sector is 0, don't multiply by 0 */
sbi->fsinfo_sector = bpb.fat32_info_sector;
if (sbi->fsinfo_sector == 0)
sbi->fsinfo_sector = 1;
fsinfo_bh = sb_bread(sb, sbi->fsinfo_sector);
if (fsinfo_bh == NULL) {
fat_msg(sb, KERN_ERR, "bread failed, FSINFO block"
" (sector = %lu)", sbi->fsinfo_sector);
goto out_fail;
}
fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
if (!IS_FSINFO(fsinfo)) {
fat_msg(sb, KERN_WARNING, "Invalid FSINFO signature: "
"0x%08x, 0x%08x (sector = %lu)",
le32_to_cpu(fsinfo->signature1),
le32_to_cpu(fsinfo->signature2),
sbi->fsinfo_sector);
} else {
if (sbi->options.usefree)
sbi->free_clus_valid = 1;
sbi->free_clusters = le32_to_cpu(fsinfo->free_clusters);
sbi->prev_free = le32_to_cpu(fsinfo->next_cluster);
}
brelse(fsinfo_bh);
}
/* interpret volume ID as a little endian 32 bit integer */
if (is_fat32(sbi))
sbi->vol_id = bpb.fat32_vol_id;
else /* fat 16 or 12 */
sbi->vol_id = bpb.fat16_vol_id;
sbi->dir_per_block = sb->s_blocksize / sizeof(struct msdos_dir_entry);
sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
sbi->dir_start = sbi->fat_start + sbi->fats * sbi->fat_length;
sbi->dir_entries = bpb.fat_dir_entries;
if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus number of directory entries"
" (%u)", sbi->dir_entries);
goto out_invalid;
}
rootdir_sectors = sbi->dir_entries
* sizeof(struct msdos_dir_entry) / sb->s_blocksize;
sbi->data_start = sbi->dir_start + rootdir_sectors;
total_sectors = bpb.fat_sectors;
if (total_sectors == 0)
total_sectors = bpb.fat_total_sect;
total_clusters = (total_sectors - sbi->data_start) / sbi->sec_per_clus;
if (!is_fat32(sbi))
sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
/* some OSes set FAT_STATE_DIRTY and clean it on unmount. */
if (is_fat32(sbi))
sbi->dirty = bpb.fat32_state & FAT_STATE_DIRTY;
else /* fat 16 or 12 */
sbi->dirty = bpb.fat16_state & FAT_STATE_DIRTY;
/* check that FAT table does not overflow */
fat_clusters = calc_fat_clusters(sb);
total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
if (total_clusters > max_fat(sb)) {
if (!silent)
fat_msg(sb, KERN_ERR, "count of clusters too big (%u)",
total_clusters);
goto out_invalid;
}
sbi->max_cluster = total_clusters + FAT_START_ENT;
/* check the free_clusters, it's not necessarily correct */
if (sbi->free_clusters != -1 && sbi->free_clusters > total_clusters)
sbi->free_clusters = -1;
/* check the prev_free, it's not necessarily correct */
sbi->prev_free %= sbi->max_cluster;
if (sbi->prev_free < FAT_START_ENT)
sbi->prev_free = FAT_START_ENT;
/* set up enough so that it can read an inode */
fat_hash_init(sb);
dir_hash_init(sb);
fat_ent_access_init(sb);
/*
* The low byte of the first FAT entry must have the same value as
* the media field of the boot sector. But in real world, too many
* devices are writing wrong values. So, removed that validity check.
*
* The removed check compared the first FAT entry to a value dependent
* on the media field like this:
* == (0x0F00 | media), for FAT12
* == (0XFF00 | media), for FAT16
* == (0x0FFFFF | media), for FAT32
*/
error = -EINVAL;
sprintf(buf, "cp%d", sbi->options.codepage);
sbi->nls_disk = load_nls(buf);
if (!sbi->nls_disk) {
fat_msg(sb, KERN_ERR, "codepage %s not found", buf);
goto out_fail;
}
/* FIXME: utf8 is using iocharset for upper/lower conversion */
if (sbi->options.isvfat) {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
fat_msg(sb, KERN_ERR, "IO charset %s not found",
sbi->options.iocharset);
goto out_fail;
}
}
error = -ENOMEM;
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
root_inode = new_inode(sb);
if (!root_inode)
goto out_fail;
root_inode->i_ino = MSDOS_ROOT_INO;
inode_set_iversion(root_inode, 1);
error = fat_read_root(root_inode);
if (error < 0) {
iput(root_inode);
goto out_fail;
}
error = -ENOMEM;
insert_inode_hash(root_inode);
fat_attach(root_inode, 0);
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
fat_msg(sb, KERN_ERR, "get root inode failed");
goto out_fail;
}
if (sbi->options.discard && !bdev_max_discard_sectors(sb->s_bdev))
fat_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but the device does not support discard");
fat_set_state(sb, 1, 0);
return 0;
out_invalid:
error = -EINVAL;
if (!silent)
fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
out_fail:
iput(fsinfo_inode);
iput(fat_inode);
unload_nls(sbi->nls_io);
unload_nls(sbi->nls_disk);
fat_reset_iocharset(&sbi->options);
sb->s_fs_info = NULL;
kfree(sbi);
return error;
}
EXPORT_SYMBOL_GPL(fat_fill_super);
/*
* helper function for fat_flush_inodes. This writes both the inode
* and the file data blocks, waiting for in flight data blocks before
* the start of the call. It does not wait for any io started
* during the call
*/
static int writeback_inode(struct inode *inode)
{
int ret;
/* if we used wait=1, sync_inode_metadata waits for the io for the
* inode to finish. So wait=0 is sent down to sync_inode_metadata
* and filemap_fdatawrite is used for the data blocks
*/
ret = sync_inode_metadata(inode, 0);
if (!ret)
ret = filemap_fdatawrite(inode->i_mapping);
return ret;
}
/*
* write data and metadata corresponding to i1 and i2. The io is
* started but we do not wait for any of it to finish.
*
* filemap_flush is used for the block device, so if there is a dirty
* page for a block already in flight, we will not wait and start the
* io over again
*/
int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2)
{
int ret = 0;
if (!MSDOS_SB(sb)->options.flush)
return 0;
if (i1)
ret = writeback_inode(i1);
if (!ret && i2)
ret = writeback_inode(i2);
if (!ret)
ret = sync_blockdev_nowait(sb->s_bdev);
return ret;
}
EXPORT_SYMBOL_GPL(fat_flush_inodes);
static int __init init_fat_fs(void)
{
int err;
err = fat_cache_init();
if (err)
return err;
err = fat_init_inodecache();
if (err)
goto failed;
return 0;
failed:
fat_cache_destroy();
return err;
}
static void __exit exit_fat_fs(void)
{
fat_cache_destroy();
fat_destroy_inodecache();
}
module_init(init_fat_fs)
module_exit(exit_fat_fs)
MODULE_LICENSE("GPL");
| linux-master | fs/fat/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/fat/cache.c
*
* Written 1992,1993 by Werner Almesberger
*
* Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
* of inode number.
* May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
*/
#include <linux/slab.h>
#include "fat.h"
/* this must be > 0. */
#define FAT_MAX_CACHE 8
struct fat_cache {
struct list_head cache_list;
int nr_contig; /* number of contiguous clusters */
int fcluster; /* cluster number in the file. */
int dcluster; /* cluster number on disk. */
};
struct fat_cache_id {
unsigned int id;
int nr_contig;
int fcluster;
int dcluster;
};
static inline int fat_max_cache(struct inode *inode)
{
return FAT_MAX_CACHE;
}
static struct kmem_cache *fat_cache_cachep;
static void init_once(void *foo)
{
struct fat_cache *cache = (struct fat_cache *)foo;
INIT_LIST_HEAD(&cache->cache_list);
}
int __init fat_cache_init(void)
{
fat_cache_cachep = kmem_cache_create("fat_cache",
sizeof(struct fat_cache),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
init_once);
if (fat_cache_cachep == NULL)
return -ENOMEM;
return 0;
}
void fat_cache_destroy(void)
{
kmem_cache_destroy(fat_cache_cachep);
}
static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
{
return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
}
static inline void fat_cache_free(struct fat_cache *cache)
{
BUG_ON(!list_empty(&cache->cache_list));
kmem_cache_free(fat_cache_cachep, cache);
}
static inline void fat_cache_update_lru(struct inode *inode,
struct fat_cache *cache)
{
if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
}
static int fat_cache_lookup(struct inode *inode, int fclus,
struct fat_cache_id *cid,
int *cached_fclus, int *cached_dclus)
{
static struct fat_cache nohit = { .fcluster = 0, };
struct fat_cache *hit = &nohit, *p;
int offset = -1;
spin_lock(&MSDOS_I(inode)->cache_lru_lock);
list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
/* Find the cache of "fclus" or nearest cache. */
if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
hit = p;
if ((hit->fcluster + hit->nr_contig) < fclus) {
offset = hit->nr_contig;
} else {
offset = fclus - hit->fcluster;
break;
}
}
}
if (hit != &nohit) {
fat_cache_update_lru(inode, hit);
cid->id = MSDOS_I(inode)->cache_valid_id;
cid->nr_contig = hit->nr_contig;
cid->fcluster = hit->fcluster;
cid->dcluster = hit->dcluster;
*cached_fclus = cid->fcluster + offset;
*cached_dclus = cid->dcluster + offset;
}
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
return offset;
}
static struct fat_cache *fat_cache_merge(struct inode *inode,
struct fat_cache_id *new)
{
struct fat_cache *p;
list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
/* Find the same part as "new" in cluster-chain. */
if (p->fcluster == new->fcluster) {
BUG_ON(p->dcluster != new->dcluster);
if (new->nr_contig > p->nr_contig)
p->nr_contig = new->nr_contig;
return p;
}
}
return NULL;
}
static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
{
struct fat_cache *cache, *tmp;
if (new->fcluster == -1) /* dummy cache */
return;
spin_lock(&MSDOS_I(inode)->cache_lru_lock);
if (new->id != FAT_CACHE_VALID &&
new->id != MSDOS_I(inode)->cache_valid_id)
goto out; /* this cache was invalidated */
cache = fat_cache_merge(inode, new);
if (cache == NULL) {
if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
MSDOS_I(inode)->nr_caches++;
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
tmp = fat_cache_alloc(inode);
if (!tmp) {
spin_lock(&MSDOS_I(inode)->cache_lru_lock);
MSDOS_I(inode)->nr_caches--;
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
return;
}
spin_lock(&MSDOS_I(inode)->cache_lru_lock);
cache = fat_cache_merge(inode, new);
if (cache != NULL) {
MSDOS_I(inode)->nr_caches--;
fat_cache_free(tmp);
goto out_update_lru;
}
cache = tmp;
} else {
struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
cache = list_entry(p, struct fat_cache, cache_list);
}
cache->fcluster = new->fcluster;
cache->dcluster = new->dcluster;
cache->nr_contig = new->nr_contig;
}
out_update_lru:
fat_cache_update_lru(inode, cache);
out:
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
}
/*
* Cache invalidation occurs rarely, thus the LRU chain is not updated. It
* fixes itself after a while.
*/
static void __fat_cache_inval_inode(struct inode *inode)
{
struct msdos_inode_info *i = MSDOS_I(inode);
struct fat_cache *cache;
while (!list_empty(&i->cache_lru)) {
cache = list_entry(i->cache_lru.next,
struct fat_cache, cache_list);
list_del_init(&cache->cache_list);
i->nr_caches--;
fat_cache_free(cache);
}
/* Update. The copy of caches before this id is discarded. */
i->cache_valid_id++;
if (i->cache_valid_id == FAT_CACHE_VALID)
i->cache_valid_id++;
}
void fat_cache_inval_inode(struct inode *inode)
{
spin_lock(&MSDOS_I(inode)->cache_lru_lock);
__fat_cache_inval_inode(inode);
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
}
static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
{
cid->nr_contig++;
return ((cid->dcluster + cid->nr_contig) == dclus);
}
static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
{
cid->id = FAT_CACHE_VALID;
cid->fcluster = fclus;
cid->dcluster = dclus;
cid->nr_contig = 0;
}
int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const int limit = sb->s_maxbytes >> sbi->cluster_bits;
struct fat_entry fatent;
struct fat_cache_id cid;
int nr;
BUG_ON(MSDOS_I(inode)->i_start == 0);
*fclus = 0;
*dclus = MSDOS_I(inode)->i_start;
if (!fat_valid_entry(sbi, *dclus)) {
fat_fs_error_ratelimit(sb,
"%s: invalid start cluster (i_pos %lld, start %08x)",
__func__, MSDOS_I(inode)->i_pos, *dclus);
return -EIO;
}
if (cluster == 0)
return 0;
if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
/*
* dummy, always not contiguous
* This is reinitialized by cache_init(), later.
*/
cache_init(&cid, -1, -1);
}
fatent_init(&fatent);
while (*fclus < cluster) {
/* prevent the infinite loop of cluster chain */
if (*fclus > limit) {
fat_fs_error_ratelimit(sb,
"%s: detected the cluster chain loop (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
}
nr = fat_ent_read(inode, &fatent, *dclus);
if (nr < 0)
goto out;
else if (nr == FAT_ENT_FREE) {
fat_fs_error_ratelimit(sb,
"%s: invalid cluster chain (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
} else if (nr == FAT_ENT_EOF) {
fat_cache_add(inode, &cid);
goto out;
}
(*fclus)++;
*dclus = nr;
if (!cache_contiguous(&cid, *dclus))
cache_init(&cid, *fclus, *dclus);
}
nr = 0;
fat_cache_add(inode, &cid);
out:
fatent_brelse(&fatent);
return nr;
}
static int fat_bmap_cluster(struct inode *inode, int cluster)
{
struct super_block *sb = inode->i_sb;
int ret, fclus, dclus;
if (MSDOS_I(inode)->i_start == 0)
return 0;
ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
if (ret < 0)
return ret;
else if (ret == FAT_ENT_EOF) {
fat_fs_error(sb, "%s: request beyond EOF (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
return -EIO;
}
return dclus;
}
int fat_get_mapped_cluster(struct inode *inode, sector_t sector,
sector_t last_block,
unsigned long *mapped_blocks, sector_t *bmap)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int cluster, offset;
cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
offset = sector & (sbi->sec_per_clus - 1);
cluster = fat_bmap_cluster(inode, cluster);
if (cluster < 0)
return cluster;
else if (cluster) {
*bmap = fat_clus_to_blknr(sbi, cluster) + offset;
*mapped_blocks = sbi->sec_per_clus - offset;
if (*mapped_blocks > last_block - sector)
*mapped_blocks = last_block - sector;
}
return 0;
}
static int is_exceed_eof(struct inode *inode, sector_t sector,
sector_t *last_block, int create)
{
struct super_block *sb = inode->i_sb;
const unsigned long blocksize = sb->s_blocksize;
const unsigned char blocksize_bits = sb->s_blocksize_bits;
*last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
if (sector >= *last_block) {
if (!create)
return 1;
/*
* ->mmu_private can access on only allocation path.
* (caller must hold ->i_mutex)
*/
*last_block = (MSDOS_I(inode)->mmu_private + (blocksize - 1))
>> blocksize_bits;
if (sector >= *last_block)
return 1;
}
return 0;
}
int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
unsigned long *mapped_blocks, int create, bool from_bmap)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
sector_t last_block;
*phys = 0;
*mapped_blocks = 0;
if (!is_fat32(sbi) && (inode->i_ino == MSDOS_ROOT_INO)) {
if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
*phys = sector + sbi->dir_start;
*mapped_blocks = 1;
}
return 0;
}
if (!from_bmap) {
if (is_exceed_eof(inode, sector, &last_block, create))
return 0;
} else {
last_block = inode->i_blocks >>
(inode->i_sb->s_blocksize_bits - 9);
if (sector >= last_block)
return 0;
}
return fat_get_mapped_cluster(inode, sector, last_block, mapped_blocks,
phys);
}
| linux-master | fs/fat/cache.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/fat/file.c
*
* Written 1992,1993 by Werner Almesberger
*
* regular file handling primitives for fat-based filesystems
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
#include <linux/falloc.h>
#include "fat.h"
static long fat_fallocate(struct file *file, int mode,
loff_t offset, loff_t len);
static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
{
u32 attr;
inode_lock_shared(inode);
attr = fat_make_attrs(inode);
inode_unlock_shared(inode);
return put_user(attr, user_attr);
}
static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
{
struct inode *inode = file_inode(file);
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int is_dir = S_ISDIR(inode->i_mode);
u32 attr, oldattr;
struct iattr ia;
int err;
err = get_user(attr, user_attr);
if (err)
goto out;
err = mnt_want_write_file(file);
if (err)
goto out;
inode_lock(inode);
/*
* ATTR_VOLUME and ATTR_DIR cannot be changed; this also
* prevents the user from turning us into a VFAT
* longname entry. Also, we obviously can't set
* any of the NTFS attributes in the high 24 bits.
*/
attr &= 0xff & ~(ATTR_VOLUME | ATTR_DIR);
/* Merge in ATTR_VOLUME and ATTR_DIR */
attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
(is_dir ? ATTR_DIR : 0);
oldattr = fat_make_attrs(inode);
/* Equivalent to a chmod() */
ia.ia_valid = ATTR_MODE | ATTR_CTIME;
ia.ia_ctime = current_time(inode);
if (is_dir)
ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO);
else {
ia.ia_mode = fat_make_mode(sbi, attr,
S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO));
}
/* The root directory has no attributes */
if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
err = -EINVAL;
goto out_unlock_inode;
}
if (sbi->options.sys_immutable &&
((attr | oldattr) & ATTR_SYS) &&
!capable(CAP_LINUX_IMMUTABLE)) {
err = -EPERM;
goto out_unlock_inode;
}
/*
* The security check is questionable... We single
* out the RO attribute for checking by the security
* module, just because it maps to a file mode.
*/
err = security_inode_setattr(file_mnt_idmap(file),
file->f_path.dentry, &ia);
if (err)
goto out_unlock_inode;
/* This MUST be done before doing anything irreversible... */
err = fat_setattr(file_mnt_idmap(file), file->f_path.dentry, &ia);
if (err)
goto out_unlock_inode;
fsnotify_change(file->f_path.dentry, ia.ia_valid);
if (sbi->options.sys_immutable) {
if (attr & ATTR_SYS)
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
}
fat_save_attrs(inode, attr);
mark_inode_dirty(inode);
out_unlock_inode:
inode_unlock(inode);
mnt_drop_write_file(file);
out:
return err;
}
static int fat_ioctl_get_volume_id(struct inode *inode, u32 __user *user_attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
return put_user(sbi->vol_id, user_attr);
}
static int fat_ioctl_fitrim(struct inode *inode, unsigned long arg)
{
struct super_block *sb = inode->i_sb;
struct fstrim_range __user *user_range;
struct fstrim_range range;
int err;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!bdev_max_discard_sectors(sb->s_bdev))
return -EOPNOTSUPP;
user_range = (struct fstrim_range __user *)arg;
if (copy_from_user(&range, user_range, sizeof(range)))
return -EFAULT;
range.minlen = max_t(unsigned int, range.minlen,
bdev_discard_granularity(sb->s_bdev));
err = fat_trim_fs(inode, &range);
if (err < 0)
return err;
if (copy_to_user(user_range, &range, sizeof(range)))
return -EFAULT;
return 0;
}
long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
u32 __user *user_attr = (u32 __user *)arg;
switch (cmd) {
case FAT_IOCTL_GET_ATTRIBUTES:
return fat_ioctl_get_attributes(inode, user_attr);
case FAT_IOCTL_SET_ATTRIBUTES:
return fat_ioctl_set_attributes(filp, user_attr);
case FAT_IOCTL_GET_VOLUME_ID:
return fat_ioctl_get_volume_id(inode, user_attr);
case FITRIM:
return fat_ioctl_fitrim(inode, arg);
default:
return -ENOTTY; /* Inappropriate ioctl for device */
}
}
static int fat_file_release(struct inode *inode, struct file *filp)
{
if ((filp->f_mode & FMODE_WRITE) &&
MSDOS_SB(inode->i_sb)->options.flush) {
fat_flush_inodes(inode->i_sb, inode, NULL);
set_current_state(TASK_UNINTERRUPTIBLE);
io_schedule_timeout(HZ/10);
}
return 0;
}
int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
int err;
err = __generic_file_fsync(filp, start, end, datasync);
if (err)
return err;
err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
if (err)
return err;
return blkdev_issue_flush(inode->i_sb->s_bdev);
}
const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fsync = fat_file_fsync,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = fat_fallocate,
};
static int fat_cont_expand(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
loff_t start = inode->i_size, count = size - inode->i_size;
int err;
err = generic_cont_expand_simple(inode, size);
if (err)
goto out;
fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
int err2;
/*
* Opencode syncing since we don't have a file open to use
* standard fsync path.
*/
err = filemap_fdatawrite_range(mapping, start,
start + count - 1);
err2 = sync_mapping_buffers(mapping);
if (!err)
err = err2;
err2 = write_inode_now(inode, 1);
if (!err)
err = err2;
if (!err) {
err = filemap_fdatawait_range(mapping, start,
start + count - 1);
}
}
out:
return err;
}
/*
* Preallocate space for a file. This implements fat's fallocate file
* operation, which gets called from sys_fallocate system call. User
* space requests len bytes at offset. If FALLOC_FL_KEEP_SIZE is set
* we just allocate clusters without zeroing them out. Otherwise we
* allocate and zero out clusters via an expanding truncate.
*/
static long fat_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
int nr_cluster; /* Number of clusters to be allocated */
loff_t mm_bytes; /* Number of bytes to be allocated for file */
loff_t ondisksize; /* block aligned on-disk size in bytes*/
struct inode *inode = file->f_mapping->host;
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int err = 0;
/* No support for hole punch or other fallocate flags. */
if (mode & ~FALLOC_FL_KEEP_SIZE)
return -EOPNOTSUPP;
/* No support for dir */
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
inode_lock(inode);
if (mode & FALLOC_FL_KEEP_SIZE) {
ondisksize = inode->i_blocks << 9;
if ((offset + len) <= ondisksize)
goto error;
/* First compute the number of clusters to be allocated */
mm_bytes = offset + len - ondisksize;
nr_cluster = (mm_bytes + (sbi->cluster_size - 1)) >>
sbi->cluster_bits;
/* Start the allocation.We are not zeroing out the clusters */
while (nr_cluster-- > 0) {
err = fat_add_cluster(inode);
if (err)
goto error;
}
} else {
if ((offset + len) <= i_size_read(inode))
goto error;
/* This is just an expanding truncate */
err = fat_cont_expand(inode, (offset + len));
}
error:
inode_unlock(inode);
return err;
}
/* Free all clusters after the skip'th cluster. */
static int fat_free(struct inode *inode, int skip)
{
struct super_block *sb = inode->i_sb;
int err, wait, free_start, i_start, i_logstart;
if (MSDOS_I(inode)->i_start == 0)
return 0;
fat_cache_inval_inode(inode);
wait = IS_DIRSYNC(inode);
i_start = free_start = MSDOS_I(inode)->i_start;
i_logstart = MSDOS_I(inode)->i_logstart;
/* First, we write the new file size. */
if (!skip) {
MSDOS_I(inode)->i_start = 0;
MSDOS_I(inode)->i_logstart = 0;
}
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
fat_truncate_time(inode, NULL, S_CTIME|S_MTIME);
if (wait) {
err = fat_sync_inode(inode);
if (err) {
MSDOS_I(inode)->i_start = i_start;
MSDOS_I(inode)->i_logstart = i_logstart;
return err;
}
} else
mark_inode_dirty(inode);
/* Write a new EOF, and get the remaining cluster chain for freeing. */
if (skip) {
struct fat_entry fatent;
int ret, fclus, dclus;
ret = fat_get_cluster(inode, skip - 1, &fclus, &dclus);
if (ret < 0)
return ret;
else if (ret == FAT_ENT_EOF)
return 0;
fatent_init(&fatent);
ret = fat_ent_read(inode, &fatent, dclus);
if (ret == FAT_ENT_EOF) {
fatent_brelse(&fatent);
return 0;
} else if (ret == FAT_ENT_FREE) {
fat_fs_error(sb,
"%s: invalid cluster chain (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
ret = -EIO;
} else if (ret > 0) {
err = fat_ent_write(inode, &fatent, FAT_ENT_EOF, wait);
if (err)
ret = err;
}
fatent_brelse(&fatent);
if (ret < 0)
return ret;
free_start = ret;
}
inode->i_blocks = skip << (MSDOS_SB(sb)->cluster_bits - 9);
/* Freeing the remained cluster chain */
return fat_free_clusters(inode, free_start);
}
void fat_truncate_blocks(struct inode *inode, loff_t offset)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
const unsigned int cluster_size = sbi->cluster_size;
int nr_clusters;
/*
* This protects against truncating a file bigger than it was then
* trying to write into the hole.
*/
if (MSDOS_I(inode)->mmu_private > offset)
MSDOS_I(inode)->mmu_private = offset;
nr_clusters = (offset + (cluster_size - 1)) >> sbi->cluster_bits;
fat_free(inode, nr_clusters);
fat_flush_inodes(inode->i_sb, inode, NULL);
}
int fat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct inode *inode = d_inode(path->dentry);
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
generic_fillattr(idmap, request_mask, inode, stat);
stat->blksize = sbi->cluster_size;
if (sbi->options.nfs == FAT_NFS_NOSTALE_RO) {
/* Use i_pos for ino. This is used as fileid of nfs. */
stat->ino = fat_i_pos_read(sbi, inode);
}
if (sbi->options.isvfat && request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
stat->btime = MSDOS_I(inode)->i_crtime;
}
return 0;
}
EXPORT_SYMBOL_GPL(fat_getattr);
static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
struct inode *inode, umode_t *mode_ptr)
{
umode_t mask, perm;
/*
* Note, the basic check is already done by a caller of
* (attr->ia_mode & ~FAT_VALID_MODE)
*/
if (S_ISREG(inode->i_mode))
mask = sbi->options.fs_fmask;
else
mask = sbi->options.fs_dmask;
perm = *mode_ptr & ~(S_IFMT | mask);
/*
* Of the r and x bits, all (subject to umask) must be present. Of the
* w bits, either all (subject to umask) or none must be present.
*
* If fat_mode_can_hold_ro(inode) is false, can't change w bits.
*/
if ((perm & (S_IRUGO | S_IXUGO)) != (inode->i_mode & (S_IRUGO|S_IXUGO)))
return -EPERM;
if (fat_mode_can_hold_ro(inode)) {
if ((perm & S_IWUGO) && ((perm & S_IWUGO) != (S_IWUGO & ~mask)))
return -EPERM;
} else {
if ((perm & S_IWUGO) != (S_IWUGO & ~mask))
return -EPERM;
}
*mode_ptr &= S_IFMT | perm;
return 0;
}
static int fat_allow_set_time(struct mnt_idmap *idmap,
struct msdos_sb_info *sbi, struct inode *inode)
{
umode_t allow_utime = sbi->options.allow_utime;
if (!vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode),
current_fsuid())) {
if (vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
allow_utime >>= 3;
if (allow_utime & MAY_WRITE)
return 1;
}
/* use a default check */
return 0;
}
#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
/* valid file mode bits */
#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
int fat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
struct inode *inode = d_inode(dentry);
unsigned int ia_valid;
int error;
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if (ia_valid & TIMES_SET_FLAGS) {
if (fat_allow_set_time(idmap, sbi, inode))
attr->ia_valid &= ~TIMES_SET_FLAGS;
}
error = setattr_prepare(idmap, dentry, attr);
attr->ia_valid = ia_valid;
if (error) {
if (sbi->options.quiet)
error = 0;
goto out;
}
/*
* Expand the file. Since inode_setattr() updates ->i_size
* before calling the ->truncate(), but FAT needs to fill the
* hole before it. XXX: this is no longer true with new truncate
* sequence.
*/
if (attr->ia_valid & ATTR_SIZE) {
inode_dio_wait(inode);
if (attr->ia_size > inode->i_size) {
error = fat_cont_expand(inode, attr->ia_size);
if (error || attr->ia_valid == ATTR_SIZE)
goto out;
attr->ia_valid &= ~ATTR_SIZE;
}
}
if (((attr->ia_valid & ATTR_UID) &&
(!uid_eq(from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid),
sbi->options.fs_uid))) ||
((attr->ia_valid & ATTR_GID) &&
(!gid_eq(from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid),
sbi->options.fs_gid))) ||
((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode & ~FAT_VALID_MODE)))
error = -EPERM;
if (error) {
if (sbi->options.quiet)
error = 0;
goto out;
}
/*
* We don't return -EPERM here. Yes, strange, but this is too
* old behavior.
*/
if (attr->ia_valid & ATTR_MODE) {
if (fat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
attr->ia_valid &= ~ATTR_MODE;
}
if (attr->ia_valid & ATTR_SIZE) {
error = fat_block_truncate_page(inode, attr->ia_size);
if (error)
goto out;
down_write(&MSDOS_I(inode)->truncate_lock);
truncate_setsize(inode, attr->ia_size);
fat_truncate_blocks(inode, attr->ia_size);
up_write(&MSDOS_I(inode)->truncate_lock);
}
/*
* setattr_copy can't truncate these appropriately, so we'll
* copy them ourselves
*/
if (attr->ia_valid & ATTR_ATIME)
fat_truncate_time(inode, &attr->ia_atime, S_ATIME);
if (attr->ia_valid & ATTR_CTIME)
fat_truncate_time(inode, &attr->ia_ctime, S_CTIME);
if (attr->ia_valid & ATTR_MTIME)
fat_truncate_time(inode, &attr->ia_mtime, S_MTIME);
attr->ia_valid &= ~(ATTR_ATIME|ATTR_CTIME|ATTR_MTIME);
setattr_copy(idmap, inode, attr);
mark_inode_dirty(inode);
out:
return error;
}
EXPORT_SYMBOL_GPL(fat_setattr);
const struct inode_operations fat_file_inode_operations = {
.setattr = fat_setattr,
.getattr = fat_getattr,
.update_time = fat_update_time,
};
| linux-master | fs/fat/file.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/fat/misc.c
*
* Written 1992,1993 by Werner Almesberger
* 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980
* and date_dos2unix for date==0 by Igor Zhbanov([email protected])
*/
#include "fat.h"
#include <linux/iversion.h>
/*
* fat_fs_error reports a file system problem that might indicate fa data
* corruption/inconsistency. Depending on 'errors' mount option the
* panic() is called, or error message is printed FAT and nothing is done,
* or filesystem is remounted read-only (default behavior).
* In case the file system is remounted read-only, it can be made writable
* again by remounting it.
*/
void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
{
struct fat_mount_options *opts = &MSDOS_SB(sb)->options;
va_list args;
struct va_format vaf;
if (report) {
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
fat_msg(sb, KERN_ERR, "error, %pV", &vaf);
va_end(args);
}
if (opts->errors == FAT_ERRORS_PANIC)
panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
sb->s_flags |= SB_RDONLY;
fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
}
}
EXPORT_SYMBOL_GPL(__fat_fs_error);
/**
* _fat_msg() - Print a preformatted FAT message based on a superblock.
* @sb: A pointer to a &struct super_block
* @level: A Kernel printk level constant
* @fmt: The printf-style format string to print.
*
* Everything that is not fat_fs_error() should be fat_msg().
*
* fat_msg() wraps _fat_msg() for printk indexing.
*/
void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
_printk(FAT_PRINTK_PREFIX "%pV\n", level, sb->s_id, &vaf);
va_end(args);
}
/* Flushes the number of free clusters on FAT32 */
/* XXX: Need to write one per FSINFO block. Currently only writes 1 */
int fat_clusters_flush(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct fat_boot_fsinfo *fsinfo;
if (!is_fat32(sbi))
return 0;
bh = sb_bread(sb, sbi->fsinfo_sector);
if (bh == NULL) {
fat_msg(sb, KERN_ERR, "bread failed in fat_clusters_flush");
return -EIO;
}
fsinfo = (struct fat_boot_fsinfo *)bh->b_data;
/* Sanity check */
if (!IS_FSINFO(fsinfo)) {
fat_msg(sb, KERN_ERR, "Invalid FSINFO signature: "
"0x%08x, 0x%08x (sector = %lu)",
le32_to_cpu(fsinfo->signature1),
le32_to_cpu(fsinfo->signature2),
sbi->fsinfo_sector);
} else {
if (sbi->free_clusters != -1)
fsinfo->free_clusters = cpu_to_le32(sbi->free_clusters);
if (sbi->prev_free != -1)
fsinfo->next_cluster = cpu_to_le32(sbi->prev_free);
mark_buffer_dirty(bh);
}
brelse(bh);
return 0;
}
/*
* fat_chain_add() adds a new cluster to the chain of clusters represented
* by inode.
*/
int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int ret, new_fclus, last;
/*
* We must locate the last cluster of the file to add this new
* one (new_dclus) to the end of the link list (the FAT).
*/
last = new_fclus = 0;
if (MSDOS_I(inode)->i_start) {
int fclus, dclus;
ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus);
if (ret < 0)
return ret;
new_fclus = fclus + 1;
last = dclus;
}
/* add new one to the last of the cluster chain */
if (last) {
struct fat_entry fatent;
fatent_init(&fatent);
ret = fat_ent_read(inode, &fatent, last);
if (ret >= 0) {
int wait = inode_needs_sync(inode);
ret = fat_ent_write(inode, &fatent, new_dclus, wait);
fatent_brelse(&fatent);
}
if (ret < 0)
return ret;
/*
* FIXME:Although we can add this cache, fat_cache_add() is
* assuming to be called after linear search with fat_cache_id.
*/
// fat_cache_add(inode, new_fclus, new_dclus);
} else {
MSDOS_I(inode)->i_start = new_dclus;
MSDOS_I(inode)->i_logstart = new_dclus;
/*
* Since generic_write_sync() synchronizes regular files later,
* we sync here only directories.
*/
if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) {
ret = fat_sync_inode(inode);
if (ret)
return ret;
} else
mark_inode_dirty(inode);
}
if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) {
fat_fs_error(sb, "clusters badly computed (%d != %llu)",
new_fclus,
(llu)(inode->i_blocks >> (sbi->cluster_bits - 9)));
fat_cache_inval_inode(inode);
}
inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9);
return 0;
}
/*
* The epoch of FAT timestamp is 1980.
* : bits : value
* date: 0 - 4: day (1 - 31)
* date: 5 - 8: month (1 - 12)
* date: 9 - 15: year (0 - 127) from 1980
* time: 0 - 4: sec (0 - 29) 2sec counts
* time: 5 - 10: min (0 - 59)
* time: 11 - 15: hour (0 - 23)
*/
#define SECS_PER_MIN 60
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
/* days between 1.1.70 and 1.1.80 (2 leap days) */
#define DAYS_DELTA (365 * 10 + 2)
/* 120 (2100 - 1980) isn't leap year */
#define YEAR_2100 120
#define IS_LEAP_YEAR(y) (!((y) & 3) && (y) != YEAR_2100)
/* Linear day numbers of the respective 1sts in non-leap years. */
static long days_in_year[] = {
/* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */
0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
};
static inline int fat_tz_offset(const struct msdos_sb_info *sbi)
{
return (sbi->options.tz_set ?
-sbi->options.time_offset :
sys_tz.tz_minuteswest) * SECS_PER_MIN;
}
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 __time, __le16 __date, u8 time_cs)
{
u16 time = le16_to_cpu(__time), date = le16_to_cpu(__date);
time64_t second;
long day, leap_day, month, year;
year = date >> 9;
month = max(1, (date >> 5) & 0xf);
day = max(1, date & 0x1f) - 1;
leap_day = (year + 3) / 4;
if (year > YEAR_2100) /* 2100 isn't leap year */
leap_day--;
if (IS_LEAP_YEAR(year) && month > 2)
leap_day++;
second = (time & 0x1f) << 1;
second += ((time >> 5) & 0x3f) * SECS_PER_MIN;
second += (time >> 11) * SECS_PER_HOUR;
second += (time64_t)(year * 365 + leap_day
+ days_in_year[month] + day
+ DAYS_DELTA) * SECS_PER_DAY;
second += fat_tz_offset(sbi);
if (time_cs) {
ts->tv_sec = second + (time_cs / 100);
ts->tv_nsec = (time_cs % 100) * 10000000;
} else {
ts->tv_sec = second;
ts->tv_nsec = 0;
}
}
/* Export fat_time_fat2unix() for the fat_test KUnit tests. */
EXPORT_SYMBOL_GPL(fat_time_fat2unix);
/* Convert linear UNIX date to a FAT time/date pair. */
void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
time64_to_tm(ts->tv_sec, -fat_tz_offset(sbi), &tm);
/* FAT can only support year between 1980 to 2107 */
if (tm.tm_year < 1980 - 1900) {
*time = 0;
*date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
if (time_cs)
*time_cs = 0;
return;
}
if (tm.tm_year > 2107 - 1900) {
*time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
*date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
if (time_cs)
*time_cs = 199;
return;
}
/* from 1900 -> from 1980 */
tm.tm_year -= 80;
/* 0~11 -> 1~12 */
tm.tm_mon++;
/* 0~59 -> 0~29(2sec counts) */
tm.tm_sec >>= 1;
*time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
*date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
if (time_cs)
*time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
}
EXPORT_SYMBOL_GPL(fat_time_unix2fat);
static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts)
{
return (struct timespec64){ ts.tv_sec & ~1ULL, 0 };
}
/*
* truncate atime to 24 hour granularity (00:00:00 in local timezone)
*/
struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi,
const struct timespec64 *ts)
{
/* to localtime */
time64_t seconds = ts->tv_sec - fat_tz_offset(sbi);
s32 remainder;
div_s64_rem(seconds, SECS_PER_DAY, &remainder);
/* to day boundary, and back to unix time */
seconds = seconds + fat_tz_offset(sbi) - remainder;
return (struct timespec64){ seconds, 0 };
}
/*
* truncate mtime to 2 second granularity
*/
struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi,
const struct timespec64 *ts)
{
return fat_timespec64_trunc_2secs(*ts);
}
/*
* truncate the various times with appropriate granularity:
* all times in root node are always 0
*/
int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
struct timespec64 ts;
if (inode->i_ino == MSDOS_ROOT_INO)
return 0;
if (now == NULL) {
now = &ts;
ts = current_time(inode);
}
if (flags & S_ATIME)
inode->i_atime = fat_truncate_atime(sbi, now);
/*
* ctime and mtime share the same on-disk field, and should be
* identical in memory. all mtime updates will be applied to ctime,
* but ctime updates are ignored.
*/
if (flags & S_MTIME)
inode->i_mtime = inode_set_ctime_to_ts(inode,
fat_truncate_mtime(sbi, now));
return 0;
}
EXPORT_SYMBOL_GPL(fat_truncate_time);
int fat_update_time(struct inode *inode, int flags)
{
int dirty_flags = 0;
if (inode->i_ino == MSDOS_ROOT_INO)
return 0;
if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
fat_truncate_time(inode, NULL, flags);
if (inode->i_sb->s_flags & SB_LAZYTIME)
dirty_flags |= I_DIRTY_TIME;
else
dirty_flags |= I_DIRTY_SYNC;
}
__mark_inode_dirty(inode, dirty_flags);
return 0;
}
EXPORT_SYMBOL_GPL(fat_update_time);
int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
{
int i, err = 0;
for (i = 0; i < nr_bhs; i++)
write_dirty_buffer(bhs[i], 0);
for (i = 0; i < nr_bhs; i++) {
wait_on_buffer(bhs[i]);
if (!err && !buffer_uptodate(bhs[i]))
err = -EIO;
}
return err;
}
| linux-master | fs/fat/misc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/isofs/joliet.c
*
* (C) 1996 Gordon Chaffee
*
* Joliet: Microsoft's Unicode extensions to iso9660
*/
#include <linux/types.h>
#include <linux/nls.h>
#include "isofs.h"
/*
* Convert Unicode 16 to UTF-8 or ASCII.
*/
static int
uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
{
__be16 *ip, ch;
unsigned char *op;
ip = uni;
op = ascii;
while ((ch = get_unaligned(ip)) && len) {
int llen;
llen = nls->uni2char(be16_to_cpu(ch), op, NLS_MAX_CHARSET_SIZE);
if (llen > 0)
op += llen;
else
*op++ = '?';
ip++;
len--;
}
*op = 0;
return (op - ascii);
}
int
get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
{
struct nls_table *nls;
unsigned char len = 0;
nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
if (!nls) {
len = utf16s_to_utf8s((const wchar_t *) de->name,
de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
outname, PAGE_SIZE);
} else {
len = uni16_to_x8(outname, (__be16 *) de->name,
de->name_len[0] >> 1, nls);
}
if ((len > 2) && (outname[len-2] == ';') && (outname[len-1] == '1'))
len -= 2;
/*
* Windows doesn't like periods at the end of a name,
* so neither do we
*/
while (len >= 2 && (outname[len-1] == '.'))
len--;
return len;
}
| linux-master | fs/isofs/joliet.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/isofs/util.c
*/
#include <linux/time.h>
#include "isofs.h"
/*
* We have to convert from a MM/DD/YY format to the Unix ctime format.
* We have to take into account leap years and all of that good stuff.
* Unfortunately, the kernel does not have the information on hand to
* take into account daylight savings time, but it shouldn't matter.
* The time stored should be localtime (with or without DST in effect),
* and the timezone offset should hold the offset required to get back
* to GMT. Thus we should always be correct.
*/
int iso_date(u8 *p, int flag)
{
int year, month, day, hour, minute, second, tz;
int crtime;
year = p[0];
month = p[1];
day = p[2];
hour = p[3];
minute = p[4];
second = p[5];
if (flag == 0) tz = p[6]; /* High sierra has no time zone */
else tz = 0;
if (year < 0) {
crtime = 0;
} else {
crtime = mktime64(year+1900, month, day, hour, minute, second);
/* sign extend */
if (tz & 0x80)
tz |= (-1 << 8);
/*
* The timezone offset is unreliable on some disks,
* so we make a sanity check. In no case is it ever
* more than 13 hours from GMT, which is 52*15min.
* The time is always stored in localtime with the
* timezone offset being what get added to GMT to
* get to localtime. Thus we need to subtract the offset
* to get to true GMT, which is what we store the time
* as internally. On the local system, the user may set
* their timezone any way they wish, of course, so GMT
* gets converted back to localtime on the receiving
* system.
*
* NOTE: mkisofs in versions prior to mkisofs-1.10 had
* the sign wrong on the timezone offset. This has now
* been corrected there too, but if you are getting screwy
* results this may be the explanation. If enough people
* complain, a user configuration option could be added
* to add the timezone offset in with the wrong sign
* for 'compatibility' with older discs, but I cannot see how
* it will matter that much.
*
* Thanks to [email protected] (Volker Kuhlmann)
* for pointing out the sign error.
*/
if (-52 <= tz && tz <= 52)
crtime -= tz * 15 * 60;
}
return crtime;
}
| linux-master | fs/isofs/util.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2001 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* linux/fs/isofs/compress.c
*
* Transparent decompression of files on an iso9660 filesystem
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/zlib.h>
#include "isofs.h"
#include "zisofs.h"
/* This should probably be global. */
static char zisofs_sink_page[PAGE_SIZE];
/*
* This contains the zlib memory allocation and the mutex for the
* allocation; this avoids failures at block-decompression time.
*/
static void *zisofs_zlib_workspace;
static DEFINE_MUTEX(zisofs_zlib_lock);
/*
* Read data of @inode from @block_start to @block_end and uncompress
* to one zisofs block. Store the data in the @pages array with @pcount
* entries. Start storing at offset @poffset of the first page.
*/
static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
loff_t block_end, int pcount,
struct page **pages, unsigned poffset,
int *errp)
{
unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
unsigned int bufsize = ISOFS_BUFFER_SIZE(inode);
unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
unsigned int bufmask = bufsize - 1;
int i, block_size = block_end - block_start;
z_stream stream = { .total_out = 0,
.avail_in = 0,
.avail_out = 0, };
int zerr;
int needblocks = (block_size + (block_start & bufmask) + bufmask)
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
struct buffer_head **bhs;
int curbh, curpage;
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
*errp = -EIO;
return 0;
}
/* Empty block? */
if (block_size == 0) {
for ( i = 0 ; i < pcount ; i++ ) {
if (!pages[i])
continue;
memzero_page(pages[i], 0, PAGE_SIZE);
SetPageUptodate(pages[i]);
}
return ((loff_t)pcount) << PAGE_SHIFT;
}
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
if (!bhs) {
*errp = -ENOMEM;
return 0;
}
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
bh_read_batch(haveblocks, bhs);
curbh = 0;
curpage = 0;
/*
* First block is special since it may be fractional. We also wait for
* it before grabbing the zlib mutex; odds are that the subsequent
* blocks are going to come in in short order so we don't hold the zlib
* mutex longer than necessary.
*/
if (!bhs[0])
goto b_eio;
wait_on_buffer(bhs[0]);
if (!buffer_uptodate(bhs[0])) {
*errp = -EIO;
goto b_eio;
}
stream.workspace = zisofs_zlib_workspace;
mutex_lock(&zisofs_zlib_lock);
zerr = zlib_inflateInit(&stream);
if (zerr != Z_OK) {
if (zerr == Z_MEM_ERROR)
*errp = -ENOMEM;
else
*errp = -EIO;
printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
zerr);
goto z_eio;
}
while (curpage < pcount && curbh < haveblocks &&
zerr != Z_STREAM_END) {
if (!stream.avail_out) {
if (pages[curpage]) {
stream.next_out = kmap_local_page(pages[curpage])
+ poffset;
stream.avail_out = PAGE_SIZE - poffset;
poffset = 0;
} else {
stream.next_out = (void *)&zisofs_sink_page;
stream.avail_out = PAGE_SIZE;
}
}
if (!stream.avail_in) {
wait_on_buffer(bhs[curbh]);
if (!buffer_uptodate(bhs[curbh])) {
*errp = -EIO;
break;
}
stream.next_in = bhs[curbh]->b_data +
(block_start & bufmask);
stream.avail_in = min_t(unsigned, bufsize -
(block_start & bufmask),
block_size);
block_size -= stream.avail_in;
block_start = 0;
}
while (stream.avail_out && stream.avail_in) {
zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
if (zerr == Z_BUF_ERROR && stream.avail_in == 0)
break;
if (zerr == Z_STREAM_END)
break;
if (zerr != Z_OK) {
/* EOF, error, or trying to read beyond end of input */
if (zerr == Z_MEM_ERROR)
*errp = -ENOMEM;
else {
printk(KERN_DEBUG
"zisofs: zisofs_inflate returned"
" %d, inode = %lu,"
" page idx = %d, bh idx = %d,"
" avail_in = %ld,"
" avail_out = %ld\n",
zerr, inode->i_ino, curpage,
curbh, stream.avail_in,
stream.avail_out);
*errp = -EIO;
}
goto inflate_out;
}
}
if (!stream.avail_out) {
/* This page completed */
if (pages[curpage]) {
flush_dcache_page(pages[curpage]);
SetPageUptodate(pages[curpage]);
}
if (stream.next_out != (unsigned char *)zisofs_sink_page) {
kunmap_local(stream.next_out);
stream.next_out = NULL;
}
curpage++;
}
if (!stream.avail_in)
curbh++;
}
inflate_out:
zlib_inflateEnd(&stream);
if (stream.next_out && stream.next_out != (unsigned char *)zisofs_sink_page)
kunmap_local(stream.next_out);
z_eio:
mutex_unlock(&zisofs_zlib_lock);
b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
kfree(bhs);
return stream.total_out;
}
/*
* Uncompress data so that pages[full_page] is fully uptodate and possibly
* fills in other pages if we have data for them.
*/
static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
struct page **pages)
{
loff_t start_off, end_off;
loff_t block_start, block_end;
unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
unsigned int blockptr;
loff_t poffset = 0;
blkcnt_t cstart_block, cend_block;
struct buffer_head *bh;
unsigned int blkbits = ISOFS_BUFFER_BITS(inode);
unsigned int blksize = 1 << blkbits;
int err;
loff_t ret;
BUG_ON(!pages[full_page]);
/*
* We want to read at least 'full_page' page. Because we have to
* uncompress the whole compression block anyway, fill the surrounding
* pages with the data we have anyway...
*/
start_off = page_offset(pages[full_page]);
end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
cstart_block = start_off >> zisofs_block_shift;
cend_block = (end_off + (1 << zisofs_block_shift) - 1)
>> zisofs_block_shift;
WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
((cstart_block << zisofs_block_shift) & PAGE_MASK));
/* Find the pointer to this specific chunk */
/* Note: we're not using isonum_731() here because the data is known aligned */
/* Note: header_size is in 32-bit words (4 bytes) */
blockptr = (header_size + cstart_block) << 2;
bh = isofs_bread(inode, blockptr >> blkbits);
if (!bh)
return -EIO;
block_start = le32_to_cpu(*(__le32 *)
(bh->b_data + (blockptr & (blksize - 1))));
while (cstart_block < cend_block && pcount > 0) {
/* Load end of the compressed block in the file */
blockptr += 4;
/* Traversed to next block? */
if (!(blockptr & (blksize - 1))) {
brelse(bh);
bh = isofs_bread(inode, blockptr >> blkbits);
if (!bh)
return -EIO;
}
block_end = le32_to_cpu(*(__le32 *)
(bh->b_data + (blockptr & (blksize - 1))));
if (block_start > block_end) {
brelse(bh);
return -EIO;
}
err = 0;
ret = zisofs_uncompress_block(inode, block_start, block_end,
pcount, pages, poffset, &err);
poffset += ret;
pages += poffset >> PAGE_SHIFT;
pcount -= poffset >> PAGE_SHIFT;
full_page -= poffset >> PAGE_SHIFT;
poffset &= ~PAGE_MASK;
if (err) {
brelse(bh);
/*
* Did we finish reading the page we really wanted
* to read?
*/
if (full_page < 0)
return 0;
return err;
}
block_start = block_end;
cstart_block++;
}
if (poffset && *pages) {
memzero_page(*pages, poffset, PAGE_SIZE - poffset);
SetPageUptodate(*pages);
}
return 0;
}
/*
* When decompressing, we typically obtain more than one page
* per reference. We inject the additional pages into the page
* cache as a form of readahead.
*/
static int zisofs_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
int err;
int i, pcount, full_page;
unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
struct page **pages;
pgoff_t index = page->index, end_index;
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/*
* If this page is wholly outside i_size we just return zero;
* do_generic_file_read() will handle this for us
*/
if (index >= end_index) {
SetPageUptodate(page);
unlock_page(page);
return 0;
}
if (PAGE_SHIFT <= zisofs_block_shift) {
/* We have already been given one page, this is the one
we must do. */
full_page = index & (zisofs_pages_per_cblock - 1);
pcount = min_t(int, zisofs_pages_per_cblock,
end_index - (index & ~(zisofs_pages_per_cblock - 1)));
index -= full_page;
} else {
full_page = 0;
pcount = 1;
}
pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
sizeof(*pages), GFP_KERNEL);
if (!pages) {
unlock_page(page);
return -ENOMEM;
}
pages[full_page] = page;
for (i = 0; i < pcount; i++, index++) {
if (i != full_page)
pages[i] = grab_cache_page_nowait(mapping, index);
if (pages[i])
ClearPageError(pages[i]);
}
err = zisofs_fill_pages(inode, full_page, pcount, pages);
/* Release any residual pages, do not SetPageUptodate */
for (i = 0; i < pcount; i++) {
if (pages[i]) {
flush_dcache_page(pages[i]);
if (i == full_page && err)
SetPageError(pages[i]);
unlock_page(pages[i]);
if (i != full_page)
put_page(pages[i]);
}
}
/* At this point, err contains 0 or -EIO depending on the "critical" page */
kfree(pages);
return err;
}
const struct address_space_operations zisofs_aops = {
.read_folio = zisofs_read_folio,
/* No bmap operation supported */
};
int __init zisofs_init(void)
{
zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
if ( !zisofs_zlib_workspace )
return -ENOMEM;
return 0;
}
void zisofs_cleanup(void)
{
vfree(zisofs_zlib_workspace);
}
| linux-master | fs/isofs/compress.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/isofs/dir.c
*
* (C) 1992, 1993, 1994 Eric Youngdale Modified for ISO 9660 filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*
* Steve Beynon : Missing last directory entries fixed
* ([email protected]) : 21st June 1996
*
* isofs directory handling functions
*/
#include <linux/gfp.h>
#include "isofs.h"
int isofs_name_translate(struct iso_directory_record *de, char *new, struct inode *inode)
{
char * old = de->name;
int len = de->name_len[0];
int i;
for (i = 0; i < len; i++) {
unsigned char c = old[i];
if (!c)
break;
if (c >= 'A' && c <= 'Z')
c |= 0x20; /* lower case */
/* Drop trailing '.;1' (ISO 9660:1988 7.5.1 requires period) */
if (c == '.' && i == len - 3 && old[i + 1] == ';' && old[i + 2] == '1')
break;
/* Drop trailing ';1' */
if (c == ';' && i == len - 2 && old[i + 1] == '1')
break;
/* Convert remaining ';' to '.' */
/* Also '/' to '.' (broken Acorn-generated ISO9660 images) */
if (c == ';' || c == '/')
c = '.';
new[i] = c;
}
return i;
}
/* Acorn extensions written by Matthew Wilcox <[email protected]> 1998 */
int get_acorn_filename(struct iso_directory_record *de,
char *retname, struct inode *inode)
{
int std;
unsigned char *chr;
int retnamlen = isofs_name_translate(de, retname, inode);
if (retnamlen == 0)
return 0;
std = sizeof(struct iso_directory_record) + de->name_len[0];
if (std & 1)
std++;
if (de->length[0] - std != 32)
return retnamlen;
chr = ((unsigned char *) de) + std;
if (strncmp(chr, "ARCHIMEDES", 10))
return retnamlen;
if ((*retname == '_') && ((chr[19] & 1) == 1))
*retname = '!';
if (((de->flags[0] & 2) == 0) && (chr[13] == 0xff)
&& ((chr[12] & 0xf0) == 0xf0)) {
retname[retnamlen] = ',';
sprintf(retname+retnamlen+1, "%3.3x",
((chr[12] & 0xf) << 8) | chr[11]);
retnamlen += 4;
}
return retnamlen;
}
/*
* This should _really_ be cleaned up some day..
*/
static int do_isofs_readdir(struct inode *inode, struct file *file,
struct dir_context *ctx,
char *tmpname, struct iso_directory_record *tmpde)
{
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
unsigned char bufbits = ISOFS_BUFFER_BITS(inode);
unsigned long block, offset, block_saved, offset_saved;
unsigned long inode_number = 0; /* Quiet GCC */
struct buffer_head *bh = NULL;
int len;
int map;
int first_de = 1;
char *p = NULL; /* Quiet GCC */
struct iso_directory_record *de;
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
offset = ctx->pos & (bufsize - 1);
block = ctx->pos >> bufbits;
while (ctx->pos < inode->i_size) {
int de_len;
if (!bh) {
bh = isofs_bread(inode, block);
if (!bh)
return 0;
}
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *)de;
/*
* If the length byte is zero, we should move on to the next
* CDROM sector. If we are at the end of the directory, we
* kick out of the while loop.
*/
if (de_len == 0) {
brelse(bh);
bh = NULL;
ctx->pos = (ctx->pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
block = ctx->pos >> bufbits;
offset = 0;
continue;
}
block_saved = block;
offset_saved = offset;
offset += de_len;
/* Make sure we have a full directory entry */
if (offset >= bufsize) {
int slop = bufsize - offset + de_len;
memcpy(tmpde, de, slop);
offset &= bufsize - 1;
block++;
brelse(bh);
bh = NULL;
if (offset) {
bh = isofs_bread(inode, block);
if (!bh)
return 0;
memcpy((void *) tmpde + slop, bh->b_data, offset);
}
de = tmpde;
}
/* Basic sanity check, whether name doesn't exceed dir entry */
if (de_len < de->name_len[0] +
sizeof(struct iso_directory_record)) {
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
inode->i_ino);
brelse(bh);
return -EIO;
}
if (first_de) {
isofs_normalize_block_and_offset(de,
&block_saved,
&offset_saved);
inode_number = isofs_get_ino(block_saved,
offset_saved, bufbits);
}
if (de->flags[-sbi->s_high_sierra] & 0x80) {
first_de = 0;
ctx->pos += de_len;
continue;
}
first_de = 1;
/* Handle the case of the '.' directory */
if (de->name_len[0] == 1 && de->name[0] == 0) {
if (!dir_emit_dot(file, ctx))
break;
ctx->pos += de_len;
continue;
}
len = 0;
/* Handle the case of the '..' directory */
if (de->name_len[0] == 1 && de->name[0] == 1) {
if (!dir_emit_dotdot(file, ctx))
break;
ctx->pos += de_len;
continue;
}
/* Handle everything else. Do name translation if there
is no Rock Ridge NM field. */
/*
* Do not report hidden files if so instructed, or associated
* files unless instructed to do so
*/
if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) ||
(!sbi->s_showassoc &&
(de->flags[-sbi->s_high_sierra] & 4))) {
ctx->pos += de_len;
continue;
}
map = 1;
if (sbi->s_rock) {
len = get_rock_ridge_filename(de, tmpname, inode);
if (len != 0) { /* may be -1 */
p = tmpname;
map = 0;
}
}
if (map) {
#ifdef CONFIG_JOLIET
if (sbi->s_joliet_level) {
len = get_joliet_filename(de, tmpname, inode);
p = tmpname;
} else
#endif
if (sbi->s_mapping == 'a') {
len = get_acorn_filename(de, tmpname, inode);
p = tmpname;
} else
if (sbi->s_mapping == 'n') {
len = isofs_name_translate(de, tmpname, inode);
p = tmpname;
} else {
p = de->name;
len = de->name_len[0];
}
}
if (len > 0) {
if (!dir_emit(ctx, p, len, inode_number, DT_UNKNOWN))
break;
}
ctx->pos += de_len;
}
if (bh)
brelse(bh);
return 0;
}
/*
* Handle allocation of temporary space for name translation and
* handling split directory entries.. The real work is done by
* "do_isofs_readdir()".
*/
static int isofs_readdir(struct file *file, struct dir_context *ctx)
{
int result;
char *tmpname;
struct iso_directory_record *tmpde;
struct inode *inode = file_inode(file);
tmpname = (char *)__get_free_page(GFP_KERNEL);
if (tmpname == NULL)
return -ENOMEM;
tmpde = (struct iso_directory_record *) (tmpname+1024);
result = do_isofs_readdir(inode, file, ctx, tmpname, tmpde);
free_page((unsigned long) tmpname);
return result;
}
const struct file_operations isofs_dir_operations =
{
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate_shared = isofs_readdir,
};
/*
* directories can handle most operations...
*/
const struct inode_operations isofs_dir_inode_operations =
{
.lookup = isofs_lookup,
};
| linux-master | fs/isofs/dir.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/isofs/inode.c
*
* (C) 1991 Linus Torvalds - minix filesystem
* 1992, 1993, 1994 Eric Youngdale Modified for ISO 9660 filesystem.
* 1994 Eberhard Mönkeberg - multi session handling.
* 1995 Mark Dobie - allow mounting of some weird VideoCDs and PhotoCDs.
* 1997 Gordon Chaffee - Joliet CDs
* 1998 Eric Lammerts - ISO 9660 Level 3
* 2004 Paul Serice - Inode Support pushed out from 4GB to 128GB
* 2004 Paul Serice - NFS Export Operations
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cred.h>
#include <linux/nls.h>
#include <linux/ctype.h>
#include <linux/statfs.h>
#include <linux/cdrom.h>
#include <linux/parser.h>
#include <linux/mpage.h>
#include <linux/user_namespace.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include "isofs.h"
#include "zisofs.h"
/* max tz offset is 13 hours */
#define MAX_TZ_OFFSET (52*15*60)
#define BEQUIET
static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
static int isofs_dentry_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#ifdef CONFIG_JOLIET
static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr);
static int isofs_hash_ms(const struct dentry *parent, struct qstr *qstr);
static int isofs_dentry_cmpi_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
static int isofs_dentry_cmp_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#endif
static void isofs_put_super(struct super_block *sb)
{
struct isofs_sb_info *sbi = ISOFS_SB(sb);
#ifdef CONFIG_JOLIET
unload_nls(sbi->s_nls_iocharset);
#endif
kfree(sbi);
sb->s_fs_info = NULL;
return;
}
static int isofs_read_inode(struct inode *, int relocated);
static int isofs_statfs (struct dentry *, struct kstatfs *);
static int isofs_show_options(struct seq_file *, struct dentry *);
static struct kmem_cache *isofs_inode_cachep;
static struct inode *isofs_alloc_inode(struct super_block *sb)
{
struct iso_inode_info *ei;
ei = alloc_inode_sb(sb, isofs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
}
static void isofs_free_inode(struct inode *inode)
{
kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
}
static void init_once(void *foo)
{
struct iso_inode_info *ei = foo;
inode_init_once(&ei->vfs_inode);
}
static int __init init_inodecache(void)
{
isofs_inode_cachep = kmem_cache_create("isofs_inode_cache",
sizeof(struct iso_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
if (!isofs_inode_cachep)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(isofs_inode_cachep);
}
static int isofs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
if (!(*flags & SB_RDONLY))
return -EROFS;
return 0;
}
static const struct super_operations isofs_sops = {
.alloc_inode = isofs_alloc_inode,
.free_inode = isofs_free_inode,
.put_super = isofs_put_super,
.statfs = isofs_statfs,
.remount_fs = isofs_remount,
.show_options = isofs_show_options,
};
static const struct dentry_operations isofs_dentry_ops[] = {
{
.d_hash = isofs_hashi,
.d_compare = isofs_dentry_cmpi,
},
#ifdef CONFIG_JOLIET
{
.d_hash = isofs_hash_ms,
.d_compare = isofs_dentry_cmp_ms,
},
{
.d_hash = isofs_hashi_ms,
.d_compare = isofs_dentry_cmpi_ms,
},
#endif
};
struct iso9660_options{
unsigned int rock:1;
unsigned int joliet:1;
unsigned int cruft:1;
unsigned int hide:1;
unsigned int showassoc:1;
unsigned int nocompress:1;
unsigned int overriderockperm:1;
unsigned int uid_set:1;
unsigned int gid_set:1;
unsigned char map;
unsigned char check;
unsigned int blocksize;
umode_t fmode;
umode_t dmode;
kgid_t gid;
kuid_t uid;
char *iocharset;
/* LVE */
s32 session;
s32 sbsector;
};
/*
* Compute the hash for the isofs name corresponding to the dentry.
*/
static int
isofs_hashi_common(const struct dentry *dentry, struct qstr *qstr, int ms)
{
const char *name;
int len;
char c;
unsigned long hash;
len = qstr->len;
name = qstr->name;
if (ms) {
while (len && name[len-1] == '.')
len--;
}
hash = init_name_hash(dentry);
while (len--) {
c = tolower(*name++);
hash = partial_name_hash(c, hash);
}
qstr->hash = end_name_hash(hash);
return 0;
}
/*
* Compare of two isofs names.
*/
static int isofs_dentry_cmp_common(
unsigned int len, const char *str,
const struct qstr *name, int ms, int ci)
{
int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = name->len;
blen = len;
if (ms) {
while (alen && name->name[alen-1] == '.')
alen--;
while (blen && str[blen-1] == '.')
blen--;
}
if (alen == blen) {
if (ci) {
if (strncasecmp(name->name, str, alen) == 0)
return 0;
} else {
if (strncmp(name->name, str, alen) == 0)
return 0;
}
}
return 1;
}
static int
isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hashi_common(dentry, qstr, 0);
}
static int
isofs_dentry_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 0, 1);
}
#ifdef CONFIG_JOLIET
/*
* Compute the hash for the isofs name corresponding to the dentry.
*/
static int
isofs_hash_common(const struct dentry *dentry, struct qstr *qstr, int ms)
{
const char *name;
int len;
len = qstr->len;
name = qstr->name;
if (ms) {
while (len && name[len-1] == '.')
len--;
}
qstr->hash = full_name_hash(dentry, name, len);
return 0;
}
static int
isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hash_common(dentry, qstr, 1);
}
static int
isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hashi_common(dentry, qstr, 1);
}
static int
isofs_dentry_cmp_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 0);
}
static int
isofs_dentry_cmpi_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 1);
}
#endif
enum {
Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore,
Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet,
Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err,
Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, Opt_overriderockperm,
};
static const match_table_t tokens = {
{Opt_norock, "norock"},
{Opt_nojoliet, "nojoliet"},
{Opt_unhide, "unhide"},
{Opt_hide, "hide"},
{Opt_showassoc, "showassoc"},
{Opt_cruft, "cruft"},
{Opt_utf8, "utf8"},
{Opt_iocharset, "iocharset=%s"},
{Opt_map_a, "map=acorn"},
{Opt_map_a, "map=a"},
{Opt_map_n, "map=normal"},
{Opt_map_n, "map=n"},
{Opt_map_o, "map=off"},
{Opt_map_o, "map=o"},
{Opt_session, "session=%u"},
{Opt_sb, "sbsector=%u"},
{Opt_check_r, "check=relaxed"},
{Opt_check_r, "check=r"},
{Opt_check_s, "check=strict"},
{Opt_check_s, "check=s"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_mode, "mode=%u"},
{Opt_dmode, "dmode=%u"},
{Opt_overriderockperm, "overriderockperm"},
{Opt_block, "block=%u"},
{Opt_ignore, "conv=binary"},
{Opt_ignore, "conv=b"},
{Opt_ignore, "conv=text"},
{Opt_ignore, "conv=t"},
{Opt_ignore, "conv=mtext"},
{Opt_ignore, "conv=m"},
{Opt_ignore, "conv=auto"},
{Opt_ignore, "conv=a"},
{Opt_nocompress, "nocompress"},
{Opt_err, NULL}
};
static int parse_options(char *options, struct iso9660_options *popt)
{
char *p;
int option;
unsigned int uv;
popt->map = 'n';
popt->rock = 1;
popt->joliet = 1;
popt->cruft = 0;
popt->hide = 0;
popt->showassoc = 0;
popt->check = 'u'; /* unset */
popt->nocompress = 0;
popt->blocksize = 1024;
popt->fmode = popt->dmode = ISOFS_INVALID_MODE;
popt->uid_set = 0;
popt->gid_set = 0;
popt->gid = GLOBAL_ROOT_GID;
popt->uid = GLOBAL_ROOT_UID;
popt->iocharset = NULL;
popt->overriderockperm = 0;
popt->session=-1;
popt->sbsector=-1;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
substring_t args[MAX_OPT_ARGS];
unsigned n;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_norock:
popt->rock = 0;
break;
case Opt_nojoliet:
popt->joliet = 0;
break;
case Opt_hide:
popt->hide = 1;
break;
case Opt_unhide:
case Opt_showassoc:
popt->showassoc = 1;
break;
case Opt_cruft:
popt->cruft = 1;
break;
#ifdef CONFIG_JOLIET
case Opt_utf8:
kfree(popt->iocharset);
popt->iocharset = kstrdup("utf8", GFP_KERNEL);
if (!popt->iocharset)
return 0;
break;
case Opt_iocharset:
kfree(popt->iocharset);
popt->iocharset = match_strdup(&args[0]);
if (!popt->iocharset)
return 0;
break;
#endif
case Opt_map_a:
popt->map = 'a';
break;
case Opt_map_o:
popt->map = 'o';
break;
case Opt_map_n:
popt->map = 'n';
break;
case Opt_session:
if (match_int(&args[0], &option))
return 0;
n = option;
/*
* Track numbers are supposed to be in range 1-99, the
* mount option starts indexing at 0.
*/
if (n >= 99)
return 0;
popt->session = n + 1;
break;
case Opt_sb:
if (match_int(&args[0], &option))
return 0;
popt->sbsector = option;
break;
case Opt_check_r:
popt->check = 'r';
break;
case Opt_check_s:
popt->check = 's';
break;
case Opt_ignore:
break;
case Opt_uid:
if (match_uint(&args[0], &uv))
return 0;
popt->uid = make_kuid(current_user_ns(), uv);
if (!uid_valid(popt->uid))
return 0;
popt->uid_set = 1;
break;
case Opt_gid:
if (match_uint(&args[0], &uv))
return 0;
popt->gid = make_kgid(current_user_ns(), uv);
if (!gid_valid(popt->gid))
return 0;
popt->gid_set = 1;
break;
case Opt_mode:
if (match_int(&args[0], &option))
return 0;
popt->fmode = option;
break;
case Opt_dmode:
if (match_int(&args[0], &option))
return 0;
popt->dmode = option;
break;
case Opt_overriderockperm:
popt->overriderockperm = 1;
break;
case Opt_block:
if (match_int(&args[0], &option))
return 0;
n = option;
if (n != 512 && n != 1024 && n != 2048)
return 0;
popt->blocksize = n;
break;
case Opt_nocompress:
popt->nocompress = 1;
break;
default:
return 0;
}
}
return 1;
}
/*
* Display the mount options in /proc/mounts.
*/
static int isofs_show_options(struct seq_file *m, struct dentry *root)
{
struct isofs_sb_info *sbi = ISOFS_SB(root->d_sb);
if (!sbi->s_rock) seq_puts(m, ",norock");
else if (!sbi->s_joliet_level) seq_puts(m, ",nojoliet");
if (sbi->s_cruft) seq_puts(m, ",cruft");
if (sbi->s_hide) seq_puts(m, ",hide");
if (sbi->s_nocompress) seq_puts(m, ",nocompress");
if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm");
if (sbi->s_showassoc) seq_puts(m, ",showassoc");
if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check);
if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping);
if (sbi->s_session != 255) seq_printf(m, ",session=%u", sbi->s_session - 1);
if (sbi->s_sbsector != -1) seq_printf(m, ",sbsector=%u", sbi->s_sbsector);
if (root->d_sb->s_blocksize != 1024)
seq_printf(m, ",blocksize=%lu", root->d_sb->s_blocksize);
if (sbi->s_uid_set)
seq_printf(m, ",uid=%u",
from_kuid_munged(&init_user_ns, sbi->s_uid));
if (sbi->s_gid_set)
seq_printf(m, ",gid=%u",
from_kgid_munged(&init_user_ns, sbi->s_gid));
if (sbi->s_dmode != ISOFS_INVALID_MODE)
seq_printf(m, ",dmode=%o", sbi->s_dmode);
if (sbi->s_fmode != ISOFS_INVALID_MODE)
seq_printf(m, ",fmode=%o", sbi->s_fmode);
#ifdef CONFIG_JOLIET
if (sbi->s_nls_iocharset)
seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
else
seq_puts(m, ",iocharset=utf8");
#endif
return 0;
}
/*
* look if the driver can tell the multi session redirection value
*
* don't change this if you don't know what you do, please!
* Multisession is legal only with XA disks.
* A non-XA disk with more than one volume descriptor may do it right, but
* usually is written in a nowhere standardized "multi-partition" manner.
* Multisession uses absolute addressing (solely the first frame of the whole
* track is #0), multi-partition uses relative addressing (each first frame of
* each track is #0), and a track is not a session.
*
* A broken CDwriter software or drive firmware does not set new standards,
* at least not if conflicting with the existing ones.
*
* [email protected]
*/
#define WE_OBEY_THE_WRITTEN_STANDARDS 1
static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
{
struct cdrom_device_info *cdi = disk_to_cdi(sb->s_bdev->bd_disk);
unsigned int vol_desc_start = 0;
if (session > 0) {
struct cdrom_tocentry te;
if (!cdi)
return 0;
te.cdte_track = session;
te.cdte_format = CDROM_LBA;
if (cdrom_read_tocentry(cdi, &te) == 0) {
printk(KERN_DEBUG "ISOFS: Session %d start %d type %d\n",
session, te.cdte_addr.lba,
te.cdte_ctrl & CDROM_DATA_TRACK);
if ((te.cdte_ctrl & CDROM_DATA_TRACK) == 4)
return te.cdte_addr.lba;
}
printk(KERN_ERR "ISOFS: Invalid session number or type of track\n");
}
if (cdi) {
struct cdrom_multisession ms_info;
ms_info.addr_format = CDROM_LBA;
if (cdrom_multisession(cdi, &ms_info) == 0) {
#if WE_OBEY_THE_WRITTEN_STANDARDS
/* necessary for a valid ms_info.addr */
if (ms_info.xa_flag)
#endif
vol_desc_start = ms_info.addr.lba;
}
}
return vol_desc_start;
}
/*
* Check if root directory is empty (has less than 3 files).
*
* Used to detect broken CDs where ISO root directory is empty but Joliet root
* directory is OK. If such CD has Rock Ridge extensions, they will be disabled
* (and Joliet used instead) or else no files would be visible.
*/
static bool rootdir_empty(struct super_block *sb, unsigned long block)
{
int offset = 0, files = 0, de_len;
struct iso_directory_record *de;
struct buffer_head *bh;
bh = sb_bread(sb, block);
if (!bh)
return true;
while (files < 3) {
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (de_len == 0)
break;
files++;
offset += de_len;
}
brelse(bh);
return files < 3;
}
/*
* Initialize the superblock and read the root inode.
*/
static int isofs_fill_super(struct super_block *s, void *data, int silent)
{
struct buffer_head *bh = NULL, *pri_bh = NULL;
struct hs_primary_descriptor *h_pri = NULL;
struct iso_primary_descriptor *pri = NULL;
struct iso_supplementary_descriptor *sec = NULL;
struct iso_directory_record *rootp;
struct inode *inode;
struct iso9660_options opt;
struct isofs_sb_info *sbi;
unsigned long first_data_zone;
int joliet_level = 0;
int iso_blknum, block;
int orig_zonesize;
int table, error = -EINVAL;
unsigned int vol_desc_start;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
s->s_fs_info = sbi;
if (!parse_options((char *)data, &opt))
goto out_freesbi;
/*
* First of all, get the hardware blocksize for this device.
* If we don't know what it is, or the hardware blocksize is
* larger than the blocksize the user specified, then use
* that value.
*/
/*
* What if bugger tells us to go beyond page size?
*/
if (bdev_logical_block_size(s->s_bdev) > 2048) {
printk(KERN_WARNING
"ISOFS: unsupported/invalid hardware sector size %d\n",
bdev_logical_block_size(s->s_bdev));
goto out_freesbi;
}
opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */
sbi->s_session = opt.session;
sbi->s_sbsector = opt.sbsector;
vol_desc_start = (opt.sbsector != -1) ?
opt.sbsector : isofs_get_last_session(s,opt.session);
for (iso_blknum = vol_desc_start+16;
iso_blknum < vol_desc_start+100; iso_blknum++) {
struct hs_volume_descriptor *hdp;
struct iso_volume_descriptor *vdp;
block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits);
if (!(bh = sb_bread(s, block)))
goto out_no_read;
vdp = (struct iso_volume_descriptor *)bh->b_data;
hdp = (struct hs_volume_descriptor *)bh->b_data;
/*
* Due to the overlapping physical location of the descriptors,
* ISO CDs can match hdp->id==HS_STANDARD_ID as well. To ensure
* proper identification in this case, we first check for ISO.
*/
if (strncmp (vdp->id, ISO_STANDARD_ID, sizeof vdp->id) == 0) {
if (isonum_711(vdp->type) == ISO_VD_END)
break;
if (isonum_711(vdp->type) == ISO_VD_PRIMARY) {
if (!pri) {
pri = (struct iso_primary_descriptor *)vdp;
/* Save the buffer in case we need it ... */
pri_bh = bh;
bh = NULL;
}
}
#ifdef CONFIG_JOLIET
else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) {
sec = (struct iso_supplementary_descriptor *)vdp;
if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) {
if (opt.joliet) {
if (sec->escape[2] == 0x40)
joliet_level = 1;
else if (sec->escape[2] == 0x43)
joliet_level = 2;
else if (sec->escape[2] == 0x45)
joliet_level = 3;
printk(KERN_DEBUG "ISO 9660 Extensions: "
"Microsoft Joliet Level %d\n",
joliet_level);
}
goto root_found;
} else {
/* Unknown supplementary volume descriptor */
sec = NULL;
}
}
#endif
} else {
if (strncmp (hdp->id, HS_STANDARD_ID, sizeof hdp->id) == 0) {
if (isonum_711(hdp->type) != ISO_VD_PRIMARY)
goto out_freebh;
sbi->s_high_sierra = 1;
opt.rock = 0;
h_pri = (struct hs_primary_descriptor *)vdp;
goto root_found;
}
}
/* Just skip any volume descriptors we don't recognize */
brelse(bh);
bh = NULL;
}
/*
* If we fall through, either no volume descriptor was found,
* or else we passed a primary descriptor looking for others.
*/
if (!pri)
goto out_unknown_format;
brelse(bh);
bh = pri_bh;
pri_bh = NULL;
root_found:
/* We don't support read-write mounts */
if (!sb_rdonly(s)) {
error = -EACCES;
goto out_freebh;
}
if (joliet_level && (!pri || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
* A disc with both Joliet and Rock Ridge is handled later
*/
pri = (struct iso_primary_descriptor *) sec;
}
if(sbi->s_high_sierra){
rootp = (struct iso_directory_record *) h_pri->root_directory_record;
sbi->s_nzones = isonum_733(h_pri->volume_space_size);
sbi->s_log_zone_size = isonum_723(h_pri->logical_block_size);
sbi->s_max_size = isonum_733(h_pri->volume_space_size);
} else {
if (!pri)
goto out_freebh;
rootp = (struct iso_directory_record *) pri->root_directory_record;
sbi->s_nzones = isonum_733(pri->volume_space_size);
sbi->s_log_zone_size = isonum_723(pri->logical_block_size);
sbi->s_max_size = isonum_733(pri->volume_space_size);
}
sbi->s_ninodes = 0; /* No way to figure this out easily */
orig_zonesize = sbi->s_log_zone_size;
/*
* If the zone size is smaller than the hardware sector size,
* this is a fatal error. This would occur if the disc drive
* had sectors that were 2048 bytes, but the filesystem had
* blocks that were 512 bytes (which should only very rarely
* happen.)
*/
if (orig_zonesize < opt.blocksize)
goto out_bad_size;
/* RDE: convert log zone size to bit shift */
switch (sbi->s_log_zone_size) {
case 512: sbi->s_log_zone_size = 9; break;
case 1024: sbi->s_log_zone_size = 10; break;
case 2048: sbi->s_log_zone_size = 11; break;
default:
goto out_bad_zone_size;
}
s->s_magic = ISOFS_SUPER_MAGIC;
/*
* With multi-extent files, file size is only limited by the maximum
* size of a file system, which is 8 TB.
*/
s->s_maxbytes = 0x80000000000LL;
/* ECMA-119 timestamp from 1900/1/1 with tz offset */
s->s_time_min = mktime64(1900, 1, 1, 0, 0, 0) - MAX_TZ_OFFSET;
s->s_time_max = mktime64(U8_MAX+1900, 12, 31, 23, 59, 59) + MAX_TZ_OFFSET;
/* Set this for reference. Its not currently used except on write
which we don't have .. */
first_data_zone = isonum_733(rootp->extent) +
isonum_711(rootp->ext_attr_length);
sbi->s_firstdatazone = first_data_zone;
#ifndef BEQUIET
printk(KERN_DEBUG "ISOFS: Max size:%ld Log zone size:%ld\n",
sbi->s_max_size, 1UL << sbi->s_log_zone_size);
printk(KERN_DEBUG "ISOFS: First datazone:%ld\n", sbi->s_firstdatazone);
if(sbi->s_high_sierra)
printk(KERN_DEBUG "ISOFS: Disc in High Sierra format.\n");
#endif
/*
* If the Joliet level is set, we _may_ decide to use the
* secondary descriptor, but can't be sure until after we
* read the root inode. But before reading the root inode
* we may need to change the device blocksize, and would
* rather release the old buffer first. So, we cache the
* first_data_zone value from the secondary descriptor.
*/
if (joliet_level) {
pri = (struct iso_primary_descriptor *) sec;
rootp = (struct iso_directory_record *)
pri->root_directory_record;
first_data_zone = isonum_733(rootp->extent) +
isonum_711(rootp->ext_attr_length);
}
/*
* We're all done using the volume descriptor, and may need
* to change the device blocksize, so release the buffer now.
*/
brelse(pri_bh);
brelse(bh);
/*
* Force the blocksize to 512 for 512 byte sectors. The file
* read primitives really get it wrong in a bad way if we don't
* do this.
*
* Note - we should never be setting the blocksize to something
* less than the hardware sector size for the device. If we
* do, we would end up having to read larger buffers and split
* out portions to satisfy requests.
*
* Note2- the idea here is that we want to deal with the optimal
* zonesize in the filesystem. If we have it set to something less,
* then we have horrible problems with trying to piece together
* bits of adjacent blocks in order to properly read directory
* entries. By forcing the blocksize in this way, we ensure
* that we will never be required to do this.
*/
sb_set_blocksize(s, orig_zonesize);
sbi->s_nls_iocharset = NULL;
#ifdef CONFIG_JOLIET
if (joliet_level) {
char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
if (strcmp(p, "utf8") != 0) {
sbi->s_nls_iocharset = opt.iocharset ?
load_nls(opt.iocharset) : load_nls_default();
if (!sbi->s_nls_iocharset)
goto out_freesbi;
}
}
#endif
s->s_op = &isofs_sops;
s->s_export_op = &isofs_export_ops;
sbi->s_mapping = opt.map;
sbi->s_rock = (opt.rock ? 2 : 0);
sbi->s_rock_offset = -1; /* initial offset, will guess until SP is found*/
sbi->s_cruft = opt.cruft;
sbi->s_hide = opt.hide;
sbi->s_showassoc = opt.showassoc;
sbi->s_uid = opt.uid;
sbi->s_gid = opt.gid;
sbi->s_uid_set = opt.uid_set;
sbi->s_gid_set = opt.gid_set;
sbi->s_nocompress = opt.nocompress;
sbi->s_overriderockperm = opt.overriderockperm;
/*
* It would be incredibly stupid to allow people to mark every file
* on the disk as suid, so we merely allow them to set the default
* permissions.
*/
if (opt.fmode != ISOFS_INVALID_MODE)
sbi->s_fmode = opt.fmode & 0777;
else
sbi->s_fmode = ISOFS_INVALID_MODE;
if (opt.dmode != ISOFS_INVALID_MODE)
sbi->s_dmode = opt.dmode & 0777;
else
sbi->s_dmode = ISOFS_INVALID_MODE;
/*
* Read the root inode, which _may_ result in changing
* the s_rock flag. Once we have the final s_rock value,
* we then decide whether to use the Joliet descriptor.
*/
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
if (IS_ERR(inode))
goto out_no_root;
/*
* Fix for broken CDs with Rock Ridge and empty ISO root directory but
* correct Joliet root directory.
*/
if (sbi->s_rock == 1 && joliet_level &&
rootdir_empty(s, sbi->s_firstdatazone)) {
printk(KERN_NOTICE
"ISOFS: primary root directory is empty. "
"Disabling Rock Ridge and switching to Joliet.");
sbi->s_rock = 0;
}
/*
* If this disk has both Rock Ridge and Joliet on it, then we
* want to use Rock Ridge by default. This can be overridden
* by using the norock mount option. There is still one other
* possibility that is not taken into account: a Rock Ridge
* CD with Unicode names. Until someone sees such a beast, it
* will not be supported.
*/
if (sbi->s_rock == 1) {
joliet_level = 0;
} else if (joliet_level) {
sbi->s_rock = 0;
if (sbi->s_firstdatazone != first_data_zone) {
sbi->s_firstdatazone = first_data_zone;
printk(KERN_DEBUG
"ISOFS: changing to secondary root\n");
iput(inode);
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
if (IS_ERR(inode))
goto out_no_root;
}
}
if (opt.check == 'u') {
/* Only Joliet is case insensitive by default */
if (joliet_level)
opt.check = 'r';
else
opt.check = 's';
}
sbi->s_joliet_level = joliet_level;
/* Make sure the root inode is a directory */
if (!S_ISDIR(inode->i_mode)) {
printk(KERN_WARNING
"isofs_fill_super: root inode is not a directory. "
"Corrupted media?\n");
goto out_iput;
}
table = 0;
if (joliet_level)
table += 2;
if (opt.check == 'r')
table++;
sbi->s_check = opt.check;
if (table)
s->s_d_op = &isofs_dentry_ops[table - 1];
/* get the root dentry */
s->s_root = d_make_root(inode);
if (!(s->s_root)) {
error = -ENOMEM;
goto out_no_inode;
}
kfree(opt.iocharset);
return 0;
/*
* Display error messages and free resources.
*/
out_iput:
iput(inode);
goto out_no_inode;
out_no_root:
error = PTR_ERR(inode);
if (error != -ENOMEM)
printk(KERN_WARNING "%s: get root inode failed\n", __func__);
out_no_inode:
#ifdef CONFIG_JOLIET
unload_nls(sbi->s_nls_iocharset);
#endif
goto out_freesbi;
out_no_read:
printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
__func__, s->s_id, iso_blknum, block);
goto out_freebh;
out_bad_zone_size:
printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
sbi->s_log_zone_size);
goto out_freebh;
out_bad_size:
printk(KERN_WARNING "ISOFS: Logical zone size(%d) < hardware blocksize(%u)\n",
orig_zonesize, opt.blocksize);
goto out_freebh;
out_unknown_format:
if (!silent)
printk(KERN_WARNING "ISOFS: Unable to identify CD-ROM format.\n");
out_freebh:
brelse(bh);
brelse(pri_bh);
out_freesbi:
kfree(opt.iocharset);
kfree(sbi);
s->s_fs_info = NULL;
return error;
}
static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = ISOFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (ISOFS_SB(sb)->s_nzones
<< (ISOFS_SB(sb)->s_log_zone_size - sb->s_blocksize_bits));
buf->f_bfree = 0;
buf->f_bavail = 0;
buf->f_files = ISOFS_SB(sb)->s_ninodes;
buf->f_ffree = 0;
buf->f_fsid = u64_to_fsid(id);
buf->f_namelen = NAME_MAX;
return 0;
}
/*
* Get a set of blocks; filling in buffer_heads if already allocated
* or getblk() if they are not. Returns the number of blocks inserted
* (-ve == error.)
*/
int isofs_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head **bh, unsigned long nblocks)
{
unsigned long b_off = iblock;
unsigned offset, sect_size;
unsigned int firstext;
unsigned long nextblk, nextoff;
int section, rv, error;
struct iso_inode_info *ei = ISOFS_I(inode);
error = -EIO;
rv = 0;
if (iblock != b_off) {
printk(KERN_DEBUG "%s: block number too large\n", __func__);
goto abort;
}
offset = 0;
firstext = ei->i_first_extent;
sect_size = ei->i_section_size >> ISOFS_BUFFER_BITS(inode);
nextblk = ei->i_next_section_block;
nextoff = ei->i_next_section_offset;
section = 0;
while (nblocks) {
/* If we are *way* beyond the end of the file, print a message.
* Access beyond the end of the file up to the next page boundary
* is normal, however because of the way the page cache works.
* In this case, we just return 0 so that we can properly fill
* the page with useless information without generating any
* I/O errors.
*/
if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
__func__, b_off,
(unsigned long long)inode->i_size);
goto abort;
}
/* On the last section, nextblk == 0, section size is likely to
* exceed sect_size by a partial block, and access beyond the
* end of the file will reach beyond the section size, too.
*/
while (nextblk && (b_off >= (offset + sect_size))) {
struct inode *ninode;
offset += sect_size;
ninode = isofs_iget(inode->i_sb, nextblk, nextoff);
if (IS_ERR(ninode)) {
error = PTR_ERR(ninode);
goto abort;
}
firstext = ISOFS_I(ninode)->i_first_extent;
sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode);
nextblk = ISOFS_I(ninode)->i_next_section_block;
nextoff = ISOFS_I(ninode)->i_next_section_offset;
iput(ninode);
if (++section > 100) {
printk(KERN_DEBUG "%s: More than 100 file sections ?!?"
" aborting...\n", __func__);
printk(KERN_DEBUG "%s: block=%lu firstext=%u sect_size=%u "
"nextblk=%lu nextoff=%lu\n", __func__,
b_off, firstext, (unsigned) sect_size,
nextblk, nextoff);
goto abort;
}
}
if (*bh) {
map_bh(*bh, inode->i_sb, firstext + b_off - offset);
} else {
*bh = sb_getblk(inode->i_sb, firstext+b_off-offset);
if (!*bh)
goto abort;
}
bh++; /* Next buffer head */
b_off++; /* Next buffer offset */
nblocks--;
rv++;
}
error = 0;
abort:
return rv != 0 ? rv : error;
}
/*
* Used by the standard interfaces.
*/
static int isofs_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret;
if (create) {
printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__);
return -EROFS;
}
ret = isofs_get_blocks(inode, iblock, &bh_result, 1);
return ret < 0 ? ret : 0;
}
static int isofs_bmap(struct inode *inode, sector_t block)
{
struct buffer_head dummy;
int error;
dummy.b_state = 0;
dummy.b_blocknr = -1000;
error = isofs_get_block(inode, block, &dummy, 0);
if (!error)
return dummy.b_blocknr;
return 0;
}
struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
{
sector_t blknr = isofs_bmap(inode, block);
if (!blknr)
return NULL;
return sb_bread(inode->i_sb, blknr);
}
static int isofs_read_folio(struct file *file, struct folio *folio)
{
return mpage_read_folio(folio, isofs_get_block);
}
static void isofs_readahead(struct readahead_control *rac)
{
mpage_readahead(rac, isofs_get_block);
}
static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,isofs_get_block);
}
static const struct address_space_operations isofs_aops = {
.read_folio = isofs_read_folio,
.readahead = isofs_readahead,
.bmap = _isofs_bmap
};
static int isofs_read_level3_size(struct inode *inode)
{
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
int high_sierra = ISOFS_SB(inode->i_sb)->s_high_sierra;
struct buffer_head *bh = NULL;
unsigned long block, offset, block_saved, offset_saved;
int i = 0;
int more_entries = 0;
struct iso_directory_record *tmpde = NULL;
struct iso_inode_info *ei = ISOFS_I(inode);
inode->i_size = 0;
/* The first 16 blocks are reserved as the System Area. Thus,
* no inodes can appear in block 0. We use this to flag that
* this is the last section. */
ei->i_next_section_block = 0;
ei->i_next_section_offset = 0;
block = ei->i_iget5_block;
offset = ei->i_iget5_offset;
do {
struct iso_directory_record *de;
unsigned int de_len;
if (!bh) {
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
}
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (de_len == 0) {
brelse(bh);
bh = NULL;
++block;
offset = 0;
continue;
}
block_saved = block;
offset_saved = offset;
offset += de_len;
/* Make sure we have a full directory entry */
if (offset >= bufsize) {
int slop = bufsize - offset + de_len;
if (!tmpde) {
tmpde = kmalloc(256, GFP_KERNEL);
if (!tmpde)
goto out_nomem;
}
memcpy(tmpde, de, slop);
offset &= bufsize - 1;
block++;
brelse(bh);
bh = NULL;
if (offset) {
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
memcpy((void *)tmpde+slop, bh->b_data, offset);
}
de = tmpde;
}
inode->i_size += isonum_733(de->size);
if (i == 1) {
ei->i_next_section_block = block_saved;
ei->i_next_section_offset = offset_saved;
}
more_entries = de->flags[-high_sierra] & 0x80;
i++;
if (i > 100)
goto out_toomany;
} while (more_entries);
out:
kfree(tmpde);
brelse(bh);
return 0;
out_nomem:
brelse(bh);
return -ENOMEM;
out_noread:
printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block);
kfree(tmpde);
return -EIO;
out_toomany:
printk(KERN_INFO "%s: More than 100 file sections ?!?, aborting...\n"
"isofs_read_level3_size: inode=%lu\n",
__func__, inode->i_ino);
goto out;
}
static int isofs_read_inode(struct inode *inode, int relocated)
{
struct super_block *sb = inode->i_sb;
struct isofs_sb_info *sbi = ISOFS_SB(sb);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
unsigned long block;
int high_sierra = sbi->s_high_sierra;
struct buffer_head *bh;
struct iso_directory_record *de;
struct iso_directory_record *tmpde = NULL;
unsigned int de_len;
unsigned long offset;
struct iso_inode_info *ei = ISOFS_I(inode);
int ret = -EIO;
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_badread;
offset = ei->i_iget5_offset;
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (de_len < sizeof(struct iso_directory_record))
goto fail;
if (offset + de_len > bufsize) {
int frag1 = bufsize - offset;
tmpde = kmalloc(de_len, GFP_KERNEL);
if (!tmpde) {
ret = -ENOMEM;
goto fail;
}
memcpy(tmpde, bh->b_data + offset, frag1);
brelse(bh);
bh = sb_bread(inode->i_sb, ++block);
if (!bh)
goto out_badread;
memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1);
de = tmpde;
}
inode->i_ino = isofs_get_ino(ei->i_iget5_block,
ei->i_iget5_offset,
ISOFS_BUFFER_BITS(inode));
/* Assume it is a normal-format file unless told otherwise */
ei->i_file_format = isofs_file_normal;
if (de->flags[-high_sierra] & 2) {
if (sbi->s_dmode != ISOFS_INVALID_MODE)
inode->i_mode = S_IFDIR | sbi->s_dmode;
else
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
set_nlink(inode, 1); /*
* Set to 1. We know there are 2, but
* the find utility tries to optimize
* if it is 2, and it screws up. It is
* easier to give 1 which tells find to
* do it the hard way.
*/
} else {
if (sbi->s_fmode != ISOFS_INVALID_MODE) {
inode->i_mode = S_IFREG | sbi->s_fmode;
} else {
/*
* Set default permissions: r-x for all. The disc
* could be shared with DOS machines so virtually
* anything could be a valid executable.
*/
inode->i_mode = S_IFREG | S_IRUGO | S_IXUGO;
}
set_nlink(inode, 1);
}
inode->i_uid = sbi->s_uid;
inode->i_gid = sbi->s_gid;
inode->i_blocks = 0;
ei->i_format_parm[0] = 0;
ei->i_format_parm[1] = 0;
ei->i_format_parm[2] = 0;
ei->i_section_size = isonum_733(de->size);
if (de->flags[-high_sierra] & 0x80) {
ret = isofs_read_level3_size(inode);
if (ret < 0)
goto fail;
ret = -EIO;
} else {
ei->i_next_section_block = 0;
ei->i_next_section_offset = 0;
inode->i_size = isonum_733(de->size);
}
/*
* Some dipshit decided to store some other bit of information
* in the high byte of the file length. Truncate size in case
* this CDROM was mounted with the cruft option.
*/
if (sbi->s_cruft)
inode->i_size &= 0x00ffffff;
if (de->interleave[0]) {
printk(KERN_DEBUG "ISOFS: Interleaved files not (yet) supported.\n");
inode->i_size = 0;
}
/* I have no idea what file_unit_size is used for, so
we will flag it for now */
if (de->file_unit_size[0] != 0) {
printk(KERN_DEBUG "ISOFS: File unit size != 0 for ISO file (%ld).\n",
inode->i_ino);
}
/* I have no idea what other flag bits are used for, so
we will flag it for now */
#ifdef DEBUG
if((de->flags[-high_sierra] & ~2)!= 0){
printk(KERN_DEBUG "ISOFS: Unusual flag settings for ISO file "
"(%ld %x).\n",
inode->i_ino, de->flags[-high_sierra]);
}
#endif
inode->i_mtime = inode->i_atime =
inode_set_ctime(inode, iso_date(de->date, high_sierra), 0);
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
/* Set the number of blocks for stat() - should be done before RR */
inode->i_blocks = (inode->i_size + 511) >> 9;
/*
* Now test for possible Rock Ridge extensions which will override
* some of these numbers in the inode structure.
*/
if (!high_sierra) {
parse_rock_ridge_inode(de, inode, relocated);
/* if we want uid/gid set, override the rock ridge setting */
if (sbi->s_uid_set)
inode->i_uid = sbi->s_uid;
if (sbi->s_gid_set)
inode->i_gid = sbi->s_gid;
}
/* Now set final access rights if overriding rock ridge setting */
if (S_ISDIR(inode->i_mode) && sbi->s_overriderockperm &&
sbi->s_dmode != ISOFS_INVALID_MODE)
inode->i_mode = S_IFDIR | sbi->s_dmode;
if (S_ISREG(inode->i_mode) && sbi->s_overriderockperm &&
sbi->s_fmode != ISOFS_INVALID_MODE)
inode->i_mode = S_IFREG | sbi->s_fmode;
/* Install the inode operations vector */
if (S_ISREG(inode->i_mode)) {
inode->i_fop = &generic_ro_fops;
switch (ei->i_file_format) {
#ifdef CONFIG_ZISOFS
case isofs_file_compressed:
inode->i_data.a_ops = &zisofs_aops;
break;
#endif
default:
inode->i_data.a_ops = &isofs_aops;
break;
}
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &isofs_dir_inode_operations;
inode->i_fop = &isofs_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_data.a_ops = &isofs_symlink_aops;
} else
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
init_special_inode(inode, inode->i_mode, inode->i_rdev);
ret = 0;
out:
kfree(tmpde);
brelse(bh);
return ret;
out_badread:
printk(KERN_WARNING "ISOFS: unable to read i-node block\n");
fail:
goto out;
}
struct isofs_iget5_callback_data {
unsigned long block;
unsigned long offset;
};
static int isofs_iget5_test(struct inode *ino, void *data)
{
struct iso_inode_info *i = ISOFS_I(ino);
struct isofs_iget5_callback_data *d =
(struct isofs_iget5_callback_data*)data;
return (i->i_iget5_block == d->block)
&& (i->i_iget5_offset == d->offset);
}
static int isofs_iget5_set(struct inode *ino, void *data)
{
struct iso_inode_info *i = ISOFS_I(ino);
struct isofs_iget5_callback_data *d =
(struct isofs_iget5_callback_data*)data;
i->i_iget5_block = d->block;
i->i_iget5_offset = d->offset;
return 0;
}
/* Store, in the inode's containing structure, the block and block
* offset that point to the underlying meta-data for the inode. The
* code below is otherwise similar to the iget() code in
* include/linux/fs.h */
struct inode *__isofs_iget(struct super_block *sb,
unsigned long block,
unsigned long offset,
int relocated)
{
unsigned long hashval;
struct inode *inode;
struct isofs_iget5_callback_data data;
long ret;
if (offset >= 1ul << sb->s_blocksize_bits)
return ERR_PTR(-EINVAL);
data.block = block;
data.offset = offset;
hashval = (block << sb->s_blocksize_bits) | offset;
inode = iget5_locked(sb, hashval, &isofs_iget5_test,
&isofs_iget5_set, &data);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
ret = isofs_read_inode(inode, relocated);
if (ret < 0) {
iget_failed(inode);
inode = ERR_PTR(ret);
} else {
unlock_new_inode(inode);
}
}
return inode;
}
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
static struct file_system_type iso9660_fs_type = {
.owner = THIS_MODULE,
.name = "iso9660",
.mount = isofs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("iso9660");
MODULE_ALIAS("iso9660");
static int __init init_iso9660_fs(void)
{
int err = init_inodecache();
if (err)
goto out;
#ifdef CONFIG_ZISOFS
err = zisofs_init();
if (err)
goto out1;
#endif
err = register_filesystem(&iso9660_fs_type);
if (err)
goto out2;
return 0;
out2:
#ifdef CONFIG_ZISOFS
zisofs_cleanup();
out1:
#endif
destroy_inodecache();
out:
return err;
}
static void __exit exit_iso9660_fs(void)
{
unregister_filesystem(&iso9660_fs_type);
#ifdef CONFIG_ZISOFS
zisofs_cleanup();
#endif
destroy_inodecache();
}
module_init(init_iso9660_fs)
module_exit(exit_iso9660_fs)
MODULE_LICENSE("GPL");
| linux-master | fs/isofs/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/isofs/namei.c
*
* (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*/
#include <linux/gfp.h>
#include "isofs.h"
/*
* ok, we cannot use strncmp, as the name is not in our data space.
* Thus we'll have to use isofs_match. No big problem. Match also makes
* some sanity tests.
*/
static int
isofs_cmp(struct dentry *dentry, const char *compare, int dlen)
{
struct qstr qstr;
qstr.name = compare;
qstr.len = dlen;
if (likely(!dentry->d_op))
return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen);
return dentry->d_op->d_compare(NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
}
/*
* isofs_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
* returns the inode number of the found entry, or 0 on error.
*/
static unsigned long
isofs_find_entry(struct inode *dir, struct dentry *dentry,
unsigned long *block_rv, unsigned long *offset_rv,
char *tmpname, struct iso_directory_record *tmpde)
{
unsigned long bufsize = ISOFS_BUFFER_SIZE(dir);
unsigned char bufbits = ISOFS_BUFFER_BITS(dir);
unsigned long block, f_pos, offset, block_saved, offset_saved;
struct buffer_head *bh = NULL;
struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb);
if (!ISOFS_I(dir)->i_first_extent)
return 0;
f_pos = 0;
offset = 0;
block = 0;
while (f_pos < dir->i_size) {
struct iso_directory_record *de;
int de_len, match, i, dlen;
char *dpnt;
if (!bh) {
bh = isofs_bread(dir, block);
if (!bh)
return 0;
}
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (!de_len) {
brelse(bh);
bh = NULL;
f_pos = (f_pos + ISOFS_BLOCK_SIZE) & ~(ISOFS_BLOCK_SIZE - 1);
block = f_pos >> bufbits;
offset = 0;
continue;
}
block_saved = bh->b_blocknr;
offset_saved = offset;
offset += de_len;
f_pos += de_len;
/* Make sure we have a full directory entry */
if (offset >= bufsize) {
int slop = bufsize - offset + de_len;
memcpy(tmpde, de, slop);
offset &= bufsize - 1;
block++;
brelse(bh);
bh = NULL;
if (offset) {
bh = isofs_bread(dir, block);
if (!bh)
return 0;
memcpy((void *) tmpde + slop, bh->b_data, offset);
}
de = tmpde;
}
dlen = de->name_len[0];
dpnt = de->name;
/* Basic sanity check, whether name doesn't exceed dir entry */
if (de_len < dlen + sizeof(struct iso_directory_record)) {
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
dir->i_ino);
brelse(bh);
return 0;
}
if (sbi->s_rock &&
((i = get_rock_ridge_filename(de, tmpname, dir)))) {
dlen = i; /* possibly -1 */
dpnt = tmpname;
#ifdef CONFIG_JOLIET
} else if (sbi->s_joliet_level) {
dlen = get_joliet_filename(de, tmpname, dir);
dpnt = tmpname;
#endif
} else if (sbi->s_mapping == 'a') {
dlen = get_acorn_filename(de, tmpname, dir);
dpnt = tmpname;
} else if (sbi->s_mapping == 'n') {
dlen = isofs_name_translate(de, tmpname, dir);
dpnt = tmpname;
}
/*
* Skip hidden or associated files unless hide or showassoc,
* respectively, is set
*/
match = 0;
if (dlen > 0 &&
(!sbi->s_hide ||
(!(de->flags[-sbi->s_high_sierra] & 1))) &&
(sbi->s_showassoc ||
(!(de->flags[-sbi->s_high_sierra] & 4)))) {
if (dpnt && (dlen > 1 || dpnt[0] > 1))
match = (isofs_cmp(dentry, dpnt, dlen) == 0);
}
if (match) {
isofs_normalize_block_and_offset(de,
&block_saved,
&offset_saved);
*block_rv = block_saved;
*offset_rv = offset_saved;
brelse(bh);
return 1;
}
}
brelse(bh);
return 0;
}
struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
int found;
unsigned long block;
unsigned long offset;
struct inode *inode;
struct page *page;
page = alloc_page(GFP_USER);
if (!page)
return ERR_PTR(-ENOMEM);
found = isofs_find_entry(dir, dentry,
&block, &offset,
page_address(page),
1024 + page_address(page));
__free_page(page);
inode = found ? isofs_iget(dir->i_sb, block, offset) : NULL;
return d_splice_alias(inode, dentry);
}
| linux-master | fs/isofs/namei.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/fs/isofs/rock.c
*
* (C) 1992, 1993 Eric Youngdale
*
* Rock Ridge Extensions to iso9660
*/
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "isofs.h"
#include "rock.h"
/*
* These functions are designed to read the system areas of a directory record
* and extract relevant information. There are different functions provided
* depending upon what information we need at the time. One function fills
* out an inode structure, a second one extracts a filename, a third one
* returns a symbolic link name, and a fourth one returns the extent number
* for the file.
*/
#define SIG(A,B) ((A) | ((B) << 8)) /* isonum_721() */
struct rock_state {
void *buffer;
unsigned char *chr;
int len;
int cont_size;
int cont_extent;
int cont_offset;
int cont_loops;
struct inode *inode;
};
/*
* This is a way of ensuring that we have something in the system
* use fields that is compatible with Rock Ridge. Return zero on success.
*/
static int check_sp(struct rock_ridge *rr, struct inode *inode)
{
if (rr->u.SP.magic[0] != 0xbe)
return -1;
if (rr->u.SP.magic[1] != 0xef)
return -1;
ISOFS_SB(inode->i_sb)->s_rock_offset = rr->u.SP.skip;
return 0;
}
static void setup_rock_ridge(struct iso_directory_record *de,
struct inode *inode, struct rock_state *rs)
{
rs->len = sizeof(struct iso_directory_record) + de->name_len[0];
if (rs->len & 1)
(rs->len)++;
rs->chr = (unsigned char *)de + rs->len;
rs->len = *((unsigned char *)de) - rs->len;
if (rs->len < 0)
rs->len = 0;
if (ISOFS_SB(inode->i_sb)->s_rock_offset != -1) {
rs->len -= ISOFS_SB(inode->i_sb)->s_rock_offset;
rs->chr += ISOFS_SB(inode->i_sb)->s_rock_offset;
if (rs->len < 0)
rs->len = 0;
}
}
static void init_rock_state(struct rock_state *rs, struct inode *inode)
{
memset(rs, 0, sizeof(*rs));
rs->inode = inode;
}
/* Maximum number of Rock Ridge continuation entries */
#define RR_MAX_CE_ENTRIES 32
/*
* Returns 0 if the caller should continue scanning, 1 if the scan must end
* and -ve on error.
*/
static int rock_continue(struct rock_state *rs)
{
int ret = 1;
int blocksize = 1 << rs->inode->i_blkbits;
const int min_de_size = offsetof(struct rock_ridge, u);
kfree(rs->buffer);
rs->buffer = NULL;
if ((unsigned)rs->cont_offset > blocksize - min_de_size ||
(unsigned)rs->cont_size > blocksize ||
(unsigned)(rs->cont_offset + rs->cont_size) > blocksize) {
printk(KERN_NOTICE "rock: corrupted directory entry. "
"extent=%d, offset=%d, size=%d\n",
rs->cont_extent, rs->cont_offset, rs->cont_size);
ret = -EIO;
goto out;
}
if (rs->cont_extent) {
struct buffer_head *bh;
rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL);
if (!rs->buffer) {
ret = -ENOMEM;
goto out;
}
ret = -EIO;
if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
goto out;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
rs->cont_size);
put_bh(bh);
rs->chr = rs->buffer;
rs->len = rs->cont_size;
rs->cont_extent = 0;
rs->cont_size = 0;
rs->cont_offset = 0;
return 0;
}
printk("Unable to read rock-ridge attributes\n");
}
out:
kfree(rs->buffer);
rs->buffer = NULL;
return ret;
}
/*
* We think there's a record of type `sig' at rs->chr. Parse the signature
* and make sure that there's really room for a record of that type.
*/
static int rock_check_overflow(struct rock_state *rs, int sig)
{
int len;
switch (sig) {
case SIG('S', 'P'):
len = sizeof(struct SU_SP_s);
break;
case SIG('C', 'E'):
len = sizeof(struct SU_CE_s);
break;
case SIG('E', 'R'):
len = sizeof(struct SU_ER_s);
break;
case SIG('R', 'R'):
len = sizeof(struct RR_RR_s);
break;
case SIG('P', 'X'):
len = sizeof(struct RR_PX_s);
break;
case SIG('P', 'N'):
len = sizeof(struct RR_PN_s);
break;
case SIG('S', 'L'):
len = sizeof(struct RR_SL_s);
break;
case SIG('N', 'M'):
len = sizeof(struct RR_NM_s);
break;
case SIG('C', 'L'):
len = sizeof(struct RR_CL_s);
break;
case SIG('P', 'L'):
len = sizeof(struct RR_PL_s);
break;
case SIG('T', 'F'):
len = sizeof(struct RR_TF_s);
break;
case SIG('Z', 'F'):
len = sizeof(struct RR_ZF_s);
break;
default:
len = 0;
break;
}
len += offsetof(struct rock_ridge, u);
if (len > rs->len) {
printk(KERN_NOTICE "rock: directory entry would overflow "
"storage\n");
printk(KERN_NOTICE "rock: sig=0x%02x, size=%d, remaining=%d\n",
sig, len, rs->len);
return -EIO;
}
return 0;
}
/*
* return length of name field; 0: not found, -1: to be ignored
*/
int get_rock_ridge_filename(struct iso_directory_record *de,
char *retname, struct inode *inode)
{
struct rock_state rs;
struct rock_ridge *rr;
int sig;
int retnamlen = 0;
int truncate = 0;
int ret = 0;
char *p;
int len;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
*retname = 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_NM) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('N', 'M'):
if (truncate)
break;
if (rr->len < 5)
break;
/*
* If the flags are 2 or 4, this indicates '.' or '..'.
* We don't want to do anything with this, because it
* screws up the code that calls us. We don't really
* care anyways, since we can just use the non-RR
* name.
*/
if (rr->u.NM.flags & 6)
break;
if (rr->u.NM.flags & ~1) {
printk("Unsupported NM flag settings (%d)\n",
rr->u.NM.flags);
break;
}
len = rr->len - 5;
if (retnamlen + len >= 254) {
truncate = 1;
break;
}
p = memchr(rr->u.NM.name, '\0', len);
if (unlikely(p))
len = p - rr->u.NM.name;
memcpy(retname + retnamlen, rr->u.NM.name, len);
retnamlen += len;
retname[retnamlen] = '\0';
break;
case SIG('R', 'E'):
kfree(rs.buffer);
return -1;
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
return retnamlen; /* If 0, this file did not have a NM field */
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
#define RR_REGARD_XA 1
#define RR_RELOC_DE 2
static int
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
struct inode *inode, int flags)
{
int symlink_len = 0;
int cnt, sig;
unsigned int reloc_block;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
struct rock_state rs;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
if (flags & RR_REGARD_XA) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
rs.len = 0;
}
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] &
(RR_PX | RR_TF | RR_SL | RR_CL)) == 0)
goto out;
break;
#endif
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
/* Invalid length of ER tag id? */
if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
goto out;
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
int p;
for (p = 0; p < rr->u.ER.len_id; p++)
printk(KERN_CONT "%c", rr->u.ER.data[p]);
}
printk(KERN_CONT "\n");
break;
case SIG('P', 'X'):
inode->i_mode = isonum_733(rr->u.PX.mode);
set_nlink(inode, isonum_733(rr->u.PX.n_links));
i_uid_write(inode, isonum_733(rr->u.PX.uid));
i_gid_write(inode, isonum_733(rr->u.PX.gid));
break;
case SIG('P', 'N'):
{
int high, low;
high = isonum_733(rr->u.PN.dev_high);
low = isonum_733(rr->u.PN.dev_low);
/*
* The Rock Ridge standard specifies that if
* sizeof(dev_t) <= 4, then the high field is
* unused, and the device number is completely
* stored in the low field. Some writers may
* ignore this subtlety,
* and as a result we test to see if the entire
* device number is
* stored in the low field, and use that.
*/
if ((low & ~0xff) && high == 0) {
inode->i_rdev =
MKDEV(low >> 8, low & 0xff);
} else {
inode->i_rdev =
MKDEV(high, low);
}
}
break;
case SIG('T', 'F'):
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
* either case.
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
if (rr->u.TF.flags & TF_CREATE) {
inode_set_ctime(inode,
iso_date(rr->u.TF.times[cnt++].time, 0),
0);
}
if (rr->u.TF.flags & TF_MODIFY) {
inode->i_mtime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_mtime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ACCESS) {
inode->i_atime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_atime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode_set_ctime(inode,
iso_date(rr->u.TF.times[cnt++].time, 0),
0);
}
break;
case SIG('S', 'L'):
{
int slen;
struct SL_component *slp;
struct SL_component *oldslp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
inode->i_size = symlink_len;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
inode->i_size +=
slp->len;
break;
case 2:
inode->i_size += 1;
break;
case 4:
inode->i_size += 2;
break;
case 8:
rootflag = 1;
inode->i_size += 1;
break;
default:
printk("Symlink component flag "
"not implemented\n");
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)
(((char *)slp) + slp->len + 2);
if (slen < 2) {
if (((rr->u.SL.
flags & 1) != 0)
&&
((oldslp->
flags & 1) == 0))
inode->i_size +=
1;
break;
}
/*
* If this component record isn't
* continued, then append a '/'.
*/
if (!rootflag
&& (oldslp->flags & 1) == 0)
inode->i_size += 1;
}
}
symlink_len = inode->i_size;
break;
case SIG('R', 'E'):
printk(KERN_WARNING "Attempt to read inode for "
"relocated directory\n");
goto out;
case SIG('C', 'L'):
if (flags & RR_RELOC_DE) {
printk(KERN_ERR
"ISOFS: Recursive directory relocation "
"is not supported\n");
goto eio;
}
reloc_block = isonum_733(rr->u.CL.location);
if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
ISOFS_I(inode)->i_iget5_offset == 0) {
printk(KERN_ERR
"ISOFS: Directory relocation points to "
"itself\n");
goto eio;
}
ISOFS_I(inode)->i_first_extent = reloc_block;
reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
}
inode->i_mode = reloc->i_mode;
set_nlink(inode, reloc->i_nlink);
inode->i_uid = reloc->i_uid;
inode->i_gid = reloc->i_gid;
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
inode->i_atime = reloc->i_atime;
inode_set_ctime_to_ts(inode, inode_get_ctime(reloc));
inode->i_mtime = reloc->i_mtime;
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
case SIG('Z', 'F'): {
int algo;
if (ISOFS_SB(inode->i_sb)->s_nocompress)
break;
algo = isonum_721(rr->u.ZF.algorithm);
if (algo == SIG('p', 'z')) {
int block_shift =
isonum_711(&rr->u.ZF.parms[1]);
if (block_shift > 17) {
printk(KERN_WARNING "isofs: "
"Can't handle ZF block "
"size of 2^%d\n",
block_shift);
} else {
/*
* Note: we don't change
* i_blocks here
*/
ISOFS_I(inode)->i_file_format =
isofs_file_compressed;
/*
* Parameters to compression
* algorithm (header size,
* block size)
*/
ISOFS_I(inode)->i_format_parm[0] =
isonum_711(&rr->u.ZF.parms[0]);
ISOFS_I(inode)->i_format_parm[1] =
isonum_711(&rr->u.ZF.parms[1]);
inode->i_size =
isonum_733(rr->u.ZF.
real_size);
}
} else {
printk(KERN_WARNING
"isofs: Unknown ZF compression "
"algorithm: %c%c\n",
rr->u.ZF.algorithm[0],
rr->u.ZF.algorithm[1]);
}
break;
}
#endif
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
ret = 0;
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
{
int slen;
int rootflag;
struct SL_component *oldslp;
struct SL_component *slp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
if (slp->len > plimit - rpnt)
return NULL;
memcpy(rpnt, slp->text, slp->len);
rpnt += slp->len;
break;
case 2:
if (rpnt >= plimit)
return NULL;
*rpnt++ = '.';
break;
case 4:
if (2 > plimit - rpnt)
return NULL;
*rpnt++ = '.';
*rpnt++ = '.';
break;
case 8:
if (rpnt >= plimit)
return NULL;
rootflag = 1;
*rpnt++ = '/';
break;
default:
printk("Symlink component flag not implemented (%d)\n",
slp->flags);
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)((char *)slp + slp->len + 2);
if (slen < 2) {
/*
* If there is another SL record, and this component
* record isn't continued, then add a slash.
*/
if ((!rootflag) && (rr->u.SL.flags & 1) &&
!(oldslp->flags & 1)) {
if (rpnt >= plimit)
return NULL;
*rpnt++ = '/';
}
break;
}
/*
* If this component record isn't continued, then append a '/'.
*/
if (!rootflag && !(oldslp->flags & 1)) {
if (rpnt >= plimit)
return NULL;
*rpnt++ = '/';
}
}
return rpnt;
}
int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
int relocated)
{
int flags = relocated ? RR_RELOC_DE : 0;
int result = parse_rock_ridge_inode_internal(de, inode, flags);
/*
* if rockridge flag was reset and we didn't look for attributes
* behind eventual XA attributes, have a look there
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
result = parse_rock_ridge_inode_internal(de, inode,
flags | RR_REGARD_XA);
}
return result;
}
/*
* read_folio() for symlinks: reads symlink contents into the folio and either
* makes it uptodate and returns 0 or returns error (-EIO)
*/
static int rock_ridge_symlink_read_folio(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
char *link = page_address(page);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
struct buffer_head *bh;
char *rpnt = link;
unsigned char *pnt;
struct iso_directory_record *raw_de;
unsigned long block, offset;
int sig;
struct rock_ridge *rr;
struct rock_state rs;
int ret;
if (!sbi->s_rock)
goto error;
init_rock_state(&rs, inode);
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
offset = ei->i_iget5_offset;
pnt = (unsigned char *)bh->b_data + offset;
raw_de = (struct iso_directory_record *)pnt;
/*
* If we go past the end of the buffer, there is some sort of error.
*/
if (offset + *pnt > bufsize)
goto out_bad_span;
/*
* Now test for possible Rock Ridge extensions which will override
* some of these numbers in the inode structure.
*/
setup_rock_ridge(raw_de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto out;
rs.chr += rr->len;
rs.len -= rr->len;
if (rs.len < 0)
goto out; /* corrupted isofs */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_SL) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('S', 'L'):
rpnt = get_symlink_chunk(rpnt, rr,
link + (PAGE_SIZE - 1));
if (rpnt == NULL)
goto out;
break;
case SIG('C', 'E'):
/* This tells is if there is a continuation record */
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret < 0)
goto fail;
if (rpnt == link)
goto fail;
brelse(bh);
*rpnt = '\0';
SetPageUptodate(page);
unlock_page(page);
return 0;
/* error exit from macro */
out:
kfree(rs.buffer);
goto fail;
out_noread:
printk("unable to read i-node block");
goto fail;
out_bad_span:
printk("symlink spans iso9660 blocks\n");
fail:
brelse(bh);
error:
SetPageError(page);
unlock_page(page);
return -EIO;
}
const struct address_space_operations isofs_symlink_aops = {
.read_folio = rock_ridge_symlink_read_folio
};
| linux-master | fs/isofs/rock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* fs/isofs/export.c
*
* (C) 2004 Paul Serice - The new inode scheme requires switching
* from iget() to iget5_locked() which means
* the NFS export operations have to be hand
* coded because the default routines rely on
* iget().
*
* The following files are helpful:
*
* Documentation/filesystems/nfs/exporting.rst
* fs/exportfs/expfs.c.
*/
#include "isofs.h"
static struct dentry *
isofs_export_iget(struct super_block *sb,
unsigned long block,
unsigned long offset,
__u32 generation)
{
struct inode *inode;
if (block == 0)
return ERR_PTR(-ESTALE);
inode = isofs_iget(sb, block, offset);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return d_obtain_alias(inode);
}
/* This function is surprisingly simple. The trick is understanding
* that "child" is always a directory. So, to find its parent, you
* simply need to find its ".." entry, normalize its block and offset,
* and return the underlying inode. See the comments for
* isofs_normalize_block_and_offset(). */
static struct dentry *isofs_export_get_parent(struct dentry *child)
{
unsigned long parent_block = 0;
unsigned long parent_offset = 0;
struct inode *child_inode = d_inode(child);
struct iso_inode_info *e_child_inode = ISOFS_I(child_inode);
struct iso_directory_record *de = NULL;
struct buffer_head * bh = NULL;
struct dentry *rv = NULL;
/* "child" must always be a directory. */
if (!S_ISDIR(child_inode->i_mode)) {
printk(KERN_ERR "isofs: isofs_export_get_parent(): "
"child is not a directory!\n");
rv = ERR_PTR(-EACCES);
goto out;
}
/* It is an invariant that the directory offset is zero. If
* it is not zero, it means the directory failed to be
* normalized for some reason. */
if (e_child_inode->i_iget5_offset != 0) {
printk(KERN_ERR "isofs: isofs_export_get_parent(): "
"child directory not normalized!\n");
rv = ERR_PTR(-EACCES);
goto out;
}
/* The child inode has been normalized such that its
* i_iget5_block value points to the "." entry. Fortunately,
* the ".." entry is located in the same block. */
parent_block = e_child_inode->i_iget5_block;
/* Get the block in question. */
bh = sb_bread(child_inode->i_sb, parent_block);
if (bh == NULL) {
rv = ERR_PTR(-EACCES);
goto out;
}
/* This is the "." entry. */
de = (struct iso_directory_record*)bh->b_data;
/* The ".." entry is always the second entry. */
parent_offset = (unsigned long)isonum_711(de->length);
de = (struct iso_directory_record*)(bh->b_data + parent_offset);
/* Verify it is in fact the ".." entry. */
if ((isonum_711(de->name_len) != 1) || (de->name[0] != 1)) {
printk(KERN_ERR "isofs: Unable to find the \"..\" "
"directory for NFS.\n");
rv = ERR_PTR(-EACCES);
goto out;
}
/* Normalize */
isofs_normalize_block_and_offset(de, &parent_block, &parent_offset);
rv = d_obtain_alias(isofs_iget(child_inode->i_sb, parent_block,
parent_offset));
out:
if (bh)
brelse(bh);
return rv;
}
static int
isofs_export_encode_fh(struct inode *inode,
__u32 *fh32,
int *max_len,
struct inode *parent)
{
struct iso_inode_info * ei = ISOFS_I(inode);
int len = *max_len;
int type = 1;
__u16 *fh16 = (__u16*)fh32;
/*
* WARNING: max_len is 5 for NFSv2. Because of this
* limitation, we use the lower 16 bits of fh32[1] to hold the
* offset of the inode and the upper 16 bits of fh32[1] to
* hold the offset of the parent.
*/
if (parent && (len < 5)) {
*max_len = 5;
return FILEID_INVALID;
} else if (len < 3) {
*max_len = 3;
return FILEID_INVALID;
}
len = 3;
fh32[0] = ei->i_iget5_block;
fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
fh16[3] = 0; /* avoid leaking uninitialized data */
fh32[2] = inode->i_generation;
if (parent) {
struct iso_inode_info *eparent;
eparent = ISOFS_I(parent);
fh32[3] = eparent->i_iget5_block;
fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */
fh32[4] = parent->i_generation;
len = 5;
type = 2;
}
*max_len = len;
return type;
}
struct isofs_fid {
u32 block;
u16 offset;
u16 parent_offset;
u32 generation;
u32 parent_block;
u32 parent_generation;
};
static struct dentry *isofs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct isofs_fid *ifid = (struct isofs_fid *)fid;
if (fh_len < 3 || fh_type > 2)
return NULL;
return isofs_export_iget(sb, ifid->block, ifid->offset,
ifid->generation);
}
static struct dentry *isofs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct isofs_fid *ifid = (struct isofs_fid *)fid;
if (fh_len < 2 || fh_type != 2)
return NULL;
return isofs_export_iget(sb,
fh_len > 2 ? ifid->parent_block : 0,
ifid->parent_offset,
fh_len > 4 ? ifid->parent_generation : 0);
}
const struct export_operations isofs_export_ops = {
.encode_fh = isofs_export_encode_fh,
.fh_to_dentry = isofs_fh_to_dentry,
.fh_to_parent = isofs_fh_to_parent,
.get_parent = isofs_export_get_parent,
};
| linux-master | fs/isofs/export.c |
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <[email protected]>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/mount.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/jffs2.h>
#include <linux/pagemap.h>
#include <linux/mtd/super.h>
#include <linux/ctype.h>
#include <linux/namei.h>
#include <linux/seq_file.h>
#include <linux/exportfs.h>
#include "compr.h"
#include "nodelist.h"
static void jffs2_put_super(struct super_block *);
static struct kmem_cache *jffs2_inode_cachep;
static struct inode *jffs2_alloc_inode(struct super_block *sb)
{
struct jffs2_inode_info *f;
f = alloc_inode_sb(sb, jffs2_inode_cachep, GFP_KERNEL);
if (!f)
return NULL;
return &f->vfs_inode;
}
static void jffs2_free_inode(struct inode *inode)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
kfree(f->target);
kmem_cache_free(jffs2_inode_cachep, f);
}
static void jffs2_i_init_once(void *foo)
{
struct jffs2_inode_info *f = foo;
mutex_init(&f->sem);
inode_init_once(&f->vfs_inode);
}
static const char *jffs2_compr_name(unsigned int compr)
{
switch (compr) {
case JFFS2_COMPR_MODE_NONE:
return "none";
#ifdef CONFIG_JFFS2_LZO
case JFFS2_COMPR_MODE_FORCELZO:
return "lzo";
#endif
#ifdef CONFIG_JFFS2_ZLIB
case JFFS2_COMPR_MODE_FORCEZLIB:
return "zlib";
#endif
default:
/* should never happen; programmer error */
WARN_ON(1);
return "";
}
}
static int jffs2_show_options(struct seq_file *s, struct dentry *root)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb);
struct jffs2_mount_opts *opts = &c->mount_opts;
if (opts->override_compr)
seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
if (opts->set_rp_size)
seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
return 0;
}
static int jffs2_sync_fs(struct super_block *sb, int wait)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
if (jffs2_is_writebuffered(c))
cancel_delayed_work_sync(&c->wbuf_dwork);
#endif
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
return 0;
}
static struct inode *jffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
uint32_t generation)
{
/* We don't care about i_generation. We'll destroy the flash
before we start re-using inode numbers anyway. And even
if that wasn't true, we'd have other problems...*/
return jffs2_iget(sb, ino);
}
static struct dentry *jffs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
jffs2_nfs_get_inode);
}
static struct dentry *jffs2_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_parent(sb, fid, fh_len, fh_type,
jffs2_nfs_get_inode);
}
static struct dentry *jffs2_get_parent(struct dentry *child)
{
struct jffs2_inode_info *f;
uint32_t pino;
BUG_ON(!d_is_dir(child));
f = JFFS2_INODE_INFO(d_inode(child));
pino = f->inocache->pino_nlink;
JFFS2_DEBUG("Parent of directory ino #%u is #%u\n",
f->inocache->ino, pino);
return d_obtain_alias(jffs2_iget(child->d_sb, pino));
}
static const struct export_operations jffs2_export_ops = {
.get_parent = jffs2_get_parent,
.fh_to_dentry = jffs2_fh_to_dentry,
.fh_to_parent = jffs2_fh_to_parent,
};
/*
* JFFS2 mount options.
*
* Opt_source: The source device
* Opt_override_compr: override default compressor
* Opt_rp_size: size of reserved pool in KiB
*/
enum {
Opt_override_compr,
Opt_rp_size,
};
static const struct constant_table jffs2_param_compr[] = {
{"none", JFFS2_COMPR_MODE_NONE },
#ifdef CONFIG_JFFS2_LZO
{"lzo", JFFS2_COMPR_MODE_FORCELZO },
#endif
#ifdef CONFIG_JFFS2_ZLIB
{"zlib", JFFS2_COMPR_MODE_FORCEZLIB },
#endif
{}
};
static const struct fs_parameter_spec jffs2_fs_parameters[] = {
fsparam_enum ("compr", Opt_override_compr, jffs2_param_compr),
fsparam_u32 ("rp_size", Opt_rp_size),
{}
};
static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct fs_parse_result result;
struct jffs2_sb_info *c = fc->s_fs_info;
int opt;
opt = fs_parse(fc, jffs2_fs_parameters, param, &result);
if (opt < 0)
return opt;
switch (opt) {
case Opt_override_compr:
c->mount_opts.compr = result.uint_32;
c->mount_opts.override_compr = true;
break;
case Opt_rp_size:
if (result.uint_32 > UINT_MAX / 1024)
return invalf(fc, "jffs2: rp_size unrepresentable");
c->mount_opts.rp_size = result.uint_32 * 1024;
c->mount_opts.set_rp_size = true;
break;
default:
return -EINVAL;
}
return 0;
}
static inline void jffs2_update_mount_opts(struct fs_context *fc)
{
struct jffs2_sb_info *new_c = fc->s_fs_info;
struct jffs2_sb_info *c = JFFS2_SB_INFO(fc->root->d_sb);
mutex_lock(&c->alloc_sem);
if (new_c->mount_opts.override_compr) {
c->mount_opts.override_compr = new_c->mount_opts.override_compr;
c->mount_opts.compr = new_c->mount_opts.compr;
}
if (new_c->mount_opts.set_rp_size) {
c->mount_opts.set_rp_size = new_c->mount_opts.set_rp_size;
c->mount_opts.rp_size = new_c->mount_opts.rp_size;
}
mutex_unlock(&c->alloc_sem);
}
static int jffs2_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
sync_filesystem(sb);
jffs2_update_mount_opts(fc);
return jffs2_do_remount_fs(sb, fc);
}
static const struct super_operations jffs2_super_operations =
{
.alloc_inode = jffs2_alloc_inode,
.free_inode = jffs2_free_inode,
.put_super = jffs2_put_super,
.statfs = jffs2_statfs,
.evict_inode = jffs2_evict_inode,
.dirty_inode = jffs2_dirty_inode,
.show_options = jffs2_show_options,
.sync_fs = jffs2_sync_fs,
};
/*
* fill in the superblock
*/
static int jffs2_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct jffs2_sb_info *c = sb->s_fs_info;
jffs2_dbg(1, "jffs2_get_sb_mtd():"
" New superblock for device %d (\"%s\")\n",
sb->s_mtd->index, sb->s_mtd->name);
c->mtd = sb->s_mtd;
c->os_priv = sb;
if (c->mount_opts.rp_size > c->mtd->size)
return invalf(fc, "jffs2: Too large reserve pool specified, max is %llu KB",
c->mtd->size / 1024);
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
mutex_init(&c->alloc_sem);
mutex_init(&c->erase_free_sem);
init_waitqueue_head(&c->erase_wait);
init_waitqueue_head(&c->inocache_wq);
spin_lock_init(&c->erase_completion_lock);
spin_lock_init(&c->inocache_lock);
sb->s_op = &jffs2_super_operations;
sb->s_export_op = &jffs2_export_ops;
sb->s_flags = sb->s_flags | SB_NOATIME;
sb->s_xattr = jffs2_xattr_handlers;
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
#endif
return jffs2_do_fill_super(sb, fc);
}
static int jffs2_get_tree(struct fs_context *fc)
{
return get_tree_mtd(fc, jffs2_fill_super);
}
static void jffs2_free_fc(struct fs_context *fc)
{
kfree(fc->s_fs_info);
}
static const struct fs_context_operations jffs2_context_ops = {
.free = jffs2_free_fc,
.parse_param = jffs2_parse_param,
.get_tree = jffs2_get_tree,
.reconfigure = jffs2_reconfigure,
};
static int jffs2_init_fs_context(struct fs_context *fc)
{
struct jffs2_sb_info *ctx;
ctx = kzalloc(sizeof(struct jffs2_sb_info), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
fc->s_fs_info = ctx;
fc->ops = &jffs2_context_ops;
return 0;
}
static void jffs2_put_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
jffs2_dbg(2, "%s()\n", __func__);
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
jffs2_sum_exit(c);
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
kvfree(c->blocks);
jffs2_flash_cleanup(c);
kfree(c->inocache_list);
jffs2_clear_xattr_subsystem(c);
mtd_sync(c->mtd);
jffs2_dbg(1, "%s(): returning\n", __func__);
}
static void jffs2_kill_sb(struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
if (c && !sb_rdonly(sb))
jffs2_stop_garbage_collect_thread(c);
kill_mtd_super(sb);
kfree(c);
}
static struct file_system_type jffs2_fs_type = {
.owner = THIS_MODULE,
.name = "jffs2",
.init_fs_context = jffs2_init_fs_context,
.parameters = jffs2_fs_parameters,
.kill_sb = jffs2_kill_sb,
};
MODULE_ALIAS_FS("jffs2");
static int __init init_jffs2_fs(void)
{
int ret;
/* Paranoia checks for on-medium structures. If we ask GCC
to pack them with __attribute__((packed)) then it _also_
assumes that they're not aligned -- so it emits crappy
code on some architectures. Ideally we want an attribute
which means just 'no padding', without the alignment
thing. But GCC doesn't have that -- we have to just
hope the structs are the right sizes, instead. */
BUILD_BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
BUILD_BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
pr_info("version 2.2."
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
" (NAND)"
#endif
#ifdef CONFIG_JFFS2_SUMMARY
" (SUMMARY) "
#endif
" © 2001-2006 Red Hat, Inc.\n");
jffs2_inode_cachep = kmem_cache_create("jffs2_i",
sizeof(struct jffs2_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
jffs2_i_init_once);
if (!jffs2_inode_cachep) {
pr_err("error: Failed to initialise inode cache\n");
return -ENOMEM;
}
ret = jffs2_compressors_init();
if (ret) {
pr_err("error: Failed to initialise compressors\n");
goto out;
}
ret = jffs2_create_slab_caches();
if (ret) {
pr_err("error: Failed to initialise slab caches\n");
goto out_compressors;
}
ret = register_filesystem(&jffs2_fs_type);
if (ret) {
pr_err("error: Failed to register filesystem\n");
goto out_slab;
}
return 0;
out_slab:
jffs2_destroy_slab_caches();
out_compressors:
jffs2_compressors_exit();
out:
kmem_cache_destroy(jffs2_inode_cachep);
return ret;
}
static void __exit exit_jffs2_fs(void)
{
unregister_filesystem(&jffs2_fs_type);
jffs2_destroy_slab_caches();
jffs2_compressors_exit();
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(jffs2_inode_cachep);
}
module_init(init_jffs2_fs);
module_exit(exit_jffs2_fs);
MODULE_DESCRIPTION("The Journalling Flash File System, v2");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
// the sake of this tag. It's Free Software.
| linux-master | fs/jffs2/super.c |
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <[email protected]>
* Copyright © 2004 Ferenc Havasi <[email protected]>,
* University of Szeged, Hungary
*
* Created by Arjan van de Ven <[email protected]>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "compr.h"
static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
/* Available compressors are on this list */
static LIST_HEAD(jffs2_compressor_list);
/* Actual compression mode */
static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
/* Statistics for blocks stored without compression */
static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
/*
* Return 1 to use this compression
*/
static int jffs2_is_best_compression(struct jffs2_compressor *this,
struct jffs2_compressor *best, uint32_t size, uint32_t bestsize)
{
switch (jffs2_compression_mode) {
case JFFS2_COMPR_MODE_SIZE:
if (bestsize > size)
return 1;
return 0;
case JFFS2_COMPR_MODE_FAVOURLZO:
if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > size))
return 1;
if ((best->compr != JFFS2_COMPR_LZO) && (bestsize > size))
return 1;
if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > (size * FAVOUR_LZO_PERCENT / 100)))
return 1;
if ((bestsize * FAVOUR_LZO_PERCENT / 100) > size)
return 1;
return 0;
}
/* Shouldn't happen */
return 0;
}
/*
* jffs2_selected_compress:
* @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB).
* If 0, just take the first available compression mode.
* @data_in: Pointer to uncompressed data
* @cpage_out: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
* Returns: the compression type used. Zero is used to show that the data
* could not be compressed; probably because we couldn't find the requested
* compression mode.
*/
static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
unsigned char **cpage_out, u32 *datalen, u32 *cdatalen)
{
struct jffs2_compressor *this;
int err, ret = JFFS2_COMPR_NONE;
uint32_t orig_slen, orig_dlen;
char *output_buf;
output_buf = kmalloc(*cdatalen, GFP_KERNEL);
if (!output_buf) {
pr_warn("No memory for compressor allocation. Compression failed.\n");
return ret;
}
orig_slen = *datalen;
orig_dlen = *cdatalen;
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
/* Skip decompress-only and disabled modules */
if (!this->compress || this->disabled)
continue;
/* Skip if not the desired compression type */
if (compr && (compr != this->compr))
continue;
/*
* Either compression type was unspecified, or we found our
* compressor; either way, we're good to go.
*/
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
*datalen = orig_slen;
*cdatalen = orig_dlen;
err = this->compress(data_in, output_buf, datalen, cdatalen);
spin_lock(&jffs2_compressor_list_lock);
this->usecount--;
if (!err) {
/* Success */
ret = this->compr;
this->stat_compr_blocks++;
this->stat_compr_orig_size += *datalen;
this->stat_compr_new_size += *cdatalen;
break;
}
}
spin_unlock(&jffs2_compressor_list_lock);
if (ret == JFFS2_COMPR_NONE)
kfree(output_buf);
else
*cpage_out = output_buf;
return ret;
}
/* jffs2_compress:
* @data_in: Pointer to uncompressed data
* @cpage_out: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
* Returns: Lower byte to be stored with data indicating compression type used.
* Zero is used to show that the data could not be compressed - the
* compressed version was actually larger than the original.
* Upper byte will be used later. (soon)
*
* If the cdata buffer isn't large enough to hold all the uncompressed data,
* jffs2_compress should compress as much as will fit, and should set
* *datalen accordingly to show the amount of data which were compressed.
*/
uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *data_in, unsigned char **cpage_out,
uint32_t *datalen, uint32_t *cdatalen)
{
int ret = JFFS2_COMPR_NONE;
int mode, compr_ret;
struct jffs2_compressor *this, *best=NULL;
unsigned char *output_buf = NULL, *tmp_buf;
uint32_t orig_slen, orig_dlen;
uint32_t best_slen=0, best_dlen=0;
if (c->mount_opts.override_compr)
mode = c->mount_opts.compr;
else
mode = jffs2_compression_mode;
switch (mode) {
case JFFS2_COMPR_MODE_NONE:
break;
case JFFS2_COMPR_MODE_PRIORITY:
ret = jffs2_selected_compress(0, data_in, cpage_out, datalen,
cdatalen);
break;
case JFFS2_COMPR_MODE_SIZE:
case JFFS2_COMPR_MODE_FAVOURLZO:
orig_slen = *datalen;
orig_dlen = *cdatalen;
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
/* Skip decompress-only backwards-compatibility and disabled modules */
if ((!this->compress)||(this->disabled))
continue;
/* Allocating memory for output buffer if necessary */
if ((this->compr_buf_size < orig_slen) && (this->compr_buf)) {
spin_unlock(&jffs2_compressor_list_lock);
kfree(this->compr_buf);
spin_lock(&jffs2_compressor_list_lock);
this->compr_buf_size=0;
this->compr_buf=NULL;
}
if (!this->compr_buf) {
spin_unlock(&jffs2_compressor_list_lock);
tmp_buf = kmalloc(orig_slen, GFP_KERNEL);
spin_lock(&jffs2_compressor_list_lock);
if (!tmp_buf) {
pr_warn("No memory for compressor allocation. (%d bytes)\n",
orig_slen);
continue;
}
else {
this->compr_buf = tmp_buf;
this->compr_buf_size = orig_slen;
}
}
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
*datalen = orig_slen;
*cdatalen = orig_dlen;
compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen);
spin_lock(&jffs2_compressor_list_lock);
this->usecount--;
if (!compr_ret) {
if (((!best_dlen) || jffs2_is_best_compression(this, best, *cdatalen, best_dlen))
&& (*cdatalen < *datalen)) {
best_dlen = *cdatalen;
best_slen = *datalen;
best = this;
}
}
}
if (best_dlen) {
*cdatalen = best_dlen;
*datalen = best_slen;
output_buf = best->compr_buf;
best->compr_buf = NULL;
best->compr_buf_size = 0;
best->stat_compr_blocks++;
best->stat_compr_orig_size += best_slen;
best->stat_compr_new_size += best_dlen;
ret = best->compr;
*cpage_out = output_buf;
}
spin_unlock(&jffs2_compressor_list_lock);
break;
case JFFS2_COMPR_MODE_FORCELZO:
ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in,
cpage_out, datalen, cdatalen);
break;
case JFFS2_COMPR_MODE_FORCEZLIB:
ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in,
cpage_out, datalen, cdatalen);
break;
default:
pr_err("unknown compression mode\n");
}
if (ret == JFFS2_COMPR_NONE) {
*cpage_out = data_in;
*datalen = *cdatalen;
none_stat_compr_blocks++;
none_stat_compr_size += *datalen;
}
return ret;
}
int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint16_t comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
{
struct jffs2_compressor *this;
int ret;
/* Older code had a bug where it would write non-zero 'usercompr'
fields. Deal with it. */
if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB)
comprtype &= 0xff;
switch (comprtype & 0xff) {
case JFFS2_COMPR_NONE:
/* This should be special-cased elsewhere, but we might as well deal with it */
memcpy(data_out, cdata_in, datalen);
none_stat_decompr_blocks++;
break;
case JFFS2_COMPR_ZERO:
memset(data_out, 0, datalen);
break;
default:
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
if (comprtype == this->compr) {
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
ret = this->decompress(cdata_in, data_out, cdatalen, datalen);
spin_lock(&jffs2_compressor_list_lock);
if (ret) {
pr_warn("Decompressor \"%s\" returned %d\n",
this->name, ret);
}
else {
this->stat_decompr_blocks++;
}
this->usecount--;
spin_unlock(&jffs2_compressor_list_lock);
return ret;
}
}
pr_warn("compression type 0x%02x not available\n", comprtype);
spin_unlock(&jffs2_compressor_list_lock);
return -EIO;
}
return 0;
}
int jffs2_register_compressor(struct jffs2_compressor *comp)
{
struct jffs2_compressor *this;
if (!comp->name) {
pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n");
return -1;
}
comp->compr_buf_size=0;
comp->compr_buf=NULL;
comp->usecount=0;
comp->stat_compr_orig_size=0;
comp->stat_compr_new_size=0;
comp->stat_compr_blocks=0;
comp->stat_decompr_blocks=0;
jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
if (this->priority < comp->priority) {
list_add(&comp->list, this->list.prev);
goto out;
}
}
list_add_tail(&comp->list, &jffs2_compressor_list);
out:
D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
})
spin_unlock(&jffs2_compressor_list_lock);
return 0;
}
int jffs2_unregister_compressor(struct jffs2_compressor *comp)
{
D2(struct jffs2_compressor *this);
jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
if (comp->usecount) {
spin_unlock(&jffs2_compressor_list_lock);
pr_warn("Compressor module is in use. Unregister failed.\n");
return -1;
}
list_del(&comp->list);
D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
})
spin_unlock(&jffs2_compressor_list_lock);
return 0;
}
void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
{
if (orig != comprbuf)
kfree(comprbuf);
}
int __init jffs2_compressors_init(void)
{
int ret = 0;
/* Registering compressors */
ret = jffs2_zlib_init();
if (ret)
goto exit;
ret = jffs2_rtime_init();
if (ret)
goto exit_zlib;
ret = jffs2_rubinmips_init();
if (ret)
goto exit_rtime;
ret = jffs2_dynrubin_init();
if (ret)
goto exit_runinmips;
ret = jffs2_lzo_init();
if (ret)
goto exit_dynrubin;
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
jffs2_dbg(1, "default compression mode: none\n");
#else
#ifdef CONFIG_JFFS2_CMODE_SIZE
jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
jffs2_dbg(1, "default compression mode: size\n");
#else
#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO;
jffs2_dbg(1, "default compression mode: favourlzo\n");
#else
jffs2_dbg(1, "default compression mode: priority\n");
#endif
#endif
#endif
return 0;
exit_dynrubin:
jffs2_dynrubin_exit();
exit_runinmips:
jffs2_rubinmips_exit();
exit_rtime:
jffs2_rtime_exit();
exit_zlib:
jffs2_zlib_exit();
exit:
return ret;
}
int jffs2_compressors_exit(void)
{
/* Unregistering compressors */
jffs2_lzo_exit();
jffs2_dynrubin_exit();
jffs2_rubinmips_exit();
jffs2_rtime_exit();
jffs2_zlib_exit();
return 0;
}
| linux-master | fs/jffs2/compr.c |
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2006 NEC Corporation
*
* Created by KaiGai Kohei <[email protected]>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/jffs2.h>
#include <linux/xattr.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED,
name, buffer, size);
}
static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{
return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED,
name, buffer, size, flags);
}
static bool jffs2_trusted_listxattr(struct dentry *dentry)
{
return capable(CAP_SYS_ADMIN);
}
const struct xattr_handler jffs2_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.list = jffs2_trusted_listxattr,
.set = jffs2_trusted_setxattr,
.get = jffs2_trusted_getxattr
};
| linux-master | fs/jffs2/xattr_trusted.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.