fs/ntfs3: Restyle comments to better align with kernel-doc

Capitalize comments and end with period for better reading.

Also function comments are now little more kernel-doc style. This way we
can easily convert them to kernel-doc style if we want. Note that these
are not yet complete with this style. Example function comments start
with /* and in kernel-doc style they start /**.

Use imperative mood in function descriptions.

Change words like ntfs -> NTFS, linux -> Linux.

Use "we" not "I" when commenting code.

Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
This commit is contained in:
Kari Argillander 2021-08-03 14:57:09 +03:00 committed by Konstantin Komarov
parent b8155e95de
commit e8b8e97f91
No known key found for this signature in database
GPG Key ID: A9B0331F832407B6
21 changed files with 1987 additions and 2070 deletions

View File

@ -3,7 +3,7 @@
*
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
* TODO: merge attr_set_size/attr_data_get_block/attr_allocate_frame?
* TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
*/
#include <linux/blkdev.h>
@ -20,7 +20,7 @@
/*
* You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
* preallocate algorithm
* preallocate algorithm.
*/
#ifndef NTFS_MIN_LOG2_OF_CLUMP
#define NTFS_MIN_LOG2_OF_CLUMP 16
@ -35,10 +35,6 @@
// 16G
#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
/*
* get_pre_allocated
*
*/
static inline u64 get_pre_allocated(u64 size)
{
u32 clump;
@ -65,7 +61,7 @@ static inline u64 get_pre_allocated(u64 size)
/*
* attr_must_be_resident
*
* returns true if attribute must be resident
* Return: True if attribute must be resident.
*/
static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
enum ATTR_TYPE type)
@ -90,9 +86,7 @@ static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
}
/*
* attr_load_runs
*
* load all runs stored in 'attr'
* attr_load_runs - Load all runs stored in @attr.
*/
int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
struct runs_tree *run, const CLST *vcn)
@ -121,9 +115,7 @@ int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
}
/*
* int run_deallocate_ex
*
* Deallocate clusters
* run_deallocate_ex - Deallocate clusters.
*/
static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST len, CLST *done, bool trim)
@ -163,7 +155,7 @@ failed:
vcn_next = vcn + clen;
if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
vcn != vcn_next) {
// save memory - don't load entire run
/* Save memory - don't load entire run. */
goto failed;
}
}
@ -176,9 +168,7 @@ out:
}
/*
* attr_allocate_clusters
*
* find free space, mark it as used and store in 'run'
* attr_allocate_clusters - Find free space, mark it as used and store in @run.
*/
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
@ -207,7 +197,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
if (new_lcn && vcn == vcn0)
*new_lcn = lcn;
/* Add new fragment into run storage */
/* Add new fragment into run storage. */
if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
wnd_set_free(wnd, lcn, flen);
@ -228,7 +218,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
}
out:
/* undo */
/* Undo. */
run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
run_truncate(run, vcn0);
@ -236,8 +226,10 @@ out:
}
/*
* if page is not NULL - it is already contains resident data
* and locked (called from ni_write_frame)
* attr_make_nonresident
*
* If page is not NULL - it is already contains resident data
* and locked (called from ni_write_frame()).
*/
int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
@ -275,7 +267,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
run_init(run);
/* make a copy of original attribute */
/* Make a copy of original attribute. */
attr_s = kmemdup(attr, asize, GFP_NOFS);
if (!attr_s) {
err = -ENOMEM;
@ -283,7 +275,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
}
if (!len) {
/* empty resident -> empty nonresident */
/* Empty resident -> Empty nonresident. */
alen = 0;
} else {
const char *data = resident_data(attr);
@ -294,7 +286,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
goto out1;
if (!rsize) {
/* empty resident -> non empty nonresident */
/* Empty resident -> Non empty nonresident. */
} else if (!is_data) {
err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
if (err)
@ -319,7 +311,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
}
}
/* remove original attribute */
/* Remove original attribute. */
used -= asize;
memmove(attr, Add2Ptr(attr, asize), used - aoff);
rec->used = cpu_to_le32(used);
@ -342,7 +334,7 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
if (is_data)
ni->ni_flags &= ~NI_FLAG_RESIDENT;
/* Resident attribute becomes non resident */
/* Resident attribute becomes non resident. */
return 0;
out3:
@ -352,20 +344,18 @@ out3:
rec->used = cpu_to_le32(used + asize);
mi->dirty = true;
out2:
/* undo: do not trim new allocated clusters */
/* Undo: do not trim new allocated clusters. */
run_deallocate(sbi, run, false);
run_close(run);
out1:
kfree(attr_s);
/*reinsert le*/
/* Reinsert le. */
out:
return err;
}
/*
* attr_set_size_res
*
* helper for attr_set_size
* attr_set_size_res - Helper for attr_set_size().
*/
static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
@ -407,14 +397,13 @@ static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
}
/*
* attr_set_size
* attr_set_size - Change the size of attribute.
*
* change the size of attribute
* Extend:
* - sparse/compressed: no allocated clusters
* - normal: append allocated and preallocated new clusters
* - Sparse/compressed: No allocated clusters.
* - Normal: Append allocated and preallocated new clusters.
* Shrink:
* - no deallocate if keep_prealloc is set
* - No deallocate if @keep_prealloc is set.
*/
int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, struct runs_tree *run,
@ -451,7 +440,7 @@ again:
if (err || !attr_b->non_res)
goto out;
/* layout of records may be changed, so do a full search */
/* Layout of records may be changed, so do a full search. */
goto again;
}
@ -530,10 +519,10 @@ next_le:
add_alloc_in_same_attr_seg:
lcn = 0;
if (is_mft) {
/* mft allocates clusters from mftzone */
/* MFT allocates clusters from MFT zone. */
pre_alloc = 0;
} else if (is_ext) {
/* no preallocate for sparse/compress */
/* No preallocate for sparse/compress. */
pre_alloc = 0;
} else if (pre_alloc == -1) {
pre_alloc = 0;
@ -544,7 +533,7 @@ add_alloc_in_same_attr_seg:
pre_alloc = new_alen2 - new_alen;
}
/* Get the last lcn to allocate from */
/* Get the last LCN to allocate from. */
if (old_alen &&
!run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
lcn = SPARSE_LCN;
@ -575,7 +564,7 @@ add_alloc_in_same_attr_seg:
}
alen = to_allocate;
} else {
/* ~3 bytes per fragment */
/* ~3 bytes per fragment. */
err = attr_allocate_clusters(
sbi, run, vcn, lcn, to_allocate, &pre_alloc,
is_mft ? ALLOCATE_MFT : 0, &alen,
@ -607,12 +596,12 @@ pack_runs:
mi_b->dirty = true;
if (next_svcn >= vcn && !to_allocate) {
/* Normal way. update attribute and exit */
/* Normal way. Update attribute and exit. */
attr_b->nres.data_size = cpu_to_le64(new_size);
goto ok;
}
/* at least two mft to avoid recursive loop*/
/* At least two MFT to avoid recursive loop. */
if (is_mft && next_svcn == vcn &&
((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
new_size = new_alloc_tmp;
@ -637,7 +626,7 @@ pack_runs:
if (next_svcn < vcn)
goto pack_runs;
/* layout of records is changed */
/* Layout of records is changed. */
goto again;
}
@ -645,15 +634,15 @@ pack_runs:
err = ni_create_attr_list(ni);
if (err)
goto out;
/* layout of records is changed */
/* Layout of records is changed. */
}
if (next_svcn >= vcn) {
/* this is mft data, repeat */
/* This is MFT data, repeat. */
goto again;
}
/* insert new attribute segment */
/* Insert new attribute segment. */
err = ni_insert_nonresident(ni, type, name, name_len, run,
next_svcn, vcn - next_svcn,
attr_b->flags, &attr, &mi);
@ -667,8 +656,10 @@ pack_runs:
evcn = le64_to_cpu(attr->nres.evcn);
le_b = NULL;
/* layout of records maybe changed */
/* find base attribute to update*/
/*
* Layout of records maybe changed.
* Find base attribute to update.
*/
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
NULL, &mi_b);
if (!attr_b) {
@ -704,11 +695,11 @@ pack_runs:
u16 le_sz = le16_to_cpu(le->size);
/*
* NOTE: list entries for one attribute are always
* NOTE: List entries for one attribute are always
* the same size. We deal with last entry (vcn==0)
* and it is not first in entries array
* (list entry for std attribute always first)
* So it is safe to step back
* (list entry for std attribute always first).
* So it is safe to step back.
*/
mi_remove_attr(mi, attr);
@ -793,7 +784,7 @@ out:
if (!err && attr_b && ret)
*ret = attr_b;
/* update inode_set_bytes*/
/* Update inode_set_bytes. */
if (!err && ((type == ATTR_DATA && !name_len) ||
(type == ATTR_ALLOC && name == I30_NAME))) {
bool dirty = false;
@ -843,7 +834,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
up_read(&ni->file.run_lock);
if (ok && (*lcn != SPARSE_LCN || !new)) {
/* normal way */
/* Normal way. */
return 0;
}
@ -909,7 +900,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
if (!ok) {
ok = run_lookup_entry(run, vcn, lcn, len, NULL);
if (ok && (*lcn != SPARSE_LCN || !new)) {
/* normal way */
/* Normal way. */
err = 0;
goto ok;
}
@ -932,7 +923,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
goto out;
}
/* Get the last lcn to allocate from */
/* Get the last LCN to allocate from. */
hint = 0;
if (vcn > evcn1) {
@ -970,20 +961,20 @@ repack:
mi_b->dirty = true;
mark_inode_dirty(&ni->vfs_inode);
/* stored [vcn : next_svcn) from [vcn : end) */
/* Stored [vcn : next_svcn) from [vcn : end). */
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
if (end <= evcn1) {
if (next_svcn == evcn1) {
/* Normal way. update attribute and exit */
/* Normal way. Update attribute and exit. */
goto ok;
}
/* add new segment [next_svcn : evcn1 - next_svcn )*/
/* Add new segment [next_svcn : evcn1 - next_svcn). */
if (!ni->attr_list.size) {
err = ni_create_attr_list(ni);
if (err)
goto out;
/* layout of records is changed */
/* Layout of records is changed. */
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
0, NULL, &mi_b);
@ -1001,7 +992,7 @@ repack:
svcn = evcn1;
/* Estimate next attribute */
/* Estimate next attribute. */
attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
if (attr) {
@ -1012,7 +1003,7 @@ repack:
if (end < next_svcn)
end = next_svcn;
while (end > evcn) {
/* remove segment [svcn : evcn)*/
/* Remove segment [svcn : evcn). */
mi_remove_attr(mi, attr);
if (!al_remove_le(ni, le)) {
@ -1021,7 +1012,7 @@ repack:
}
if (evcn + 1 >= alloc) {
/* last attribute segment */
/* Last attribute segment. */
evcn1 = evcn + 1;
goto ins_ext;
}
@ -1125,7 +1116,7 @@ int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
return -EINVAL;
if (attr->non_res) {
/*return special error code to check this case*/
/* Return special error code to check this case. */
return E_NTFS_NONRESIDENT;
}
@ -1148,9 +1139,7 @@ int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
}
/*
* attr_load_runs_vcn
*
* load runs with vcn
* attr_load_runs_vcn - Load runs with VCN.
*/
int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, struct runs_tree *run,
@ -1180,7 +1169,7 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
}
/*
* load runs for given range [from to)
* attr_wof_load_runs_range - Load runs for given range [from to).
*/
int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, struct runs_tree *run,
@ -1199,7 +1188,7 @@ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
vcn);
if (err)
return err;
clen = 0; /*next run_lookup_entry(vcn) must be success*/
clen = 0; /* Next run_lookup_entry(vcn) must be success. */
}
}
@ -1210,7 +1199,7 @@ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
/*
* attr_wof_frame_info
*
* read header of xpress/lzx file to get info about frame
* Read header of Xpress/LZX file to get info about frame.
*/
int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
struct runs_tree *run, u64 frame, u64 frames,
@ -1227,20 +1216,20 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
__le64 *off64;
if (ni->vfs_inode.i_size < 0x100000000ull) {
/* file starts with array of 32 bit offsets */
/* File starts with array of 32 bit offsets. */
bytes_per_off = sizeof(__le32);
vbo[1] = frame << 2;
*vbo_data = frames << 2;
} else {
/* file starts with array of 64 bit offsets */
/* File starts with array of 64 bit offsets. */
bytes_per_off = sizeof(__le64);
vbo[1] = frame << 3;
*vbo_data = frames << 3;
}
/*
* read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts
* read 4/8 bytes at [vbo] == offset where compressed frame ends
* Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
* Read 4/8 bytes at [vbo] == offset where compressed frame ends.
*/
if (!attr->non_res) {
if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
@ -1329,7 +1318,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
off[0] = le64_to_cpu(*off64);
}
} else {
/* two values in one page*/
/* Two values in one page. */
if (bytes_per_off == sizeof(__le32)) {
off32 = Add2Ptr(addr, voff);
off[0] = le32_to_cpu(off32[-1]);
@ -1355,9 +1344,7 @@ out:
#endif
/*
* attr_is_frame_compressed
*
* This function is used to detect compressed frame
* attr_is_frame_compressed - Used to detect compressed frame.
*/
int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
CLST frame, CLST *clst_data)
@ -1391,14 +1378,14 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
}
if (lcn == SPARSE_LCN) {
/* sparsed frame */
/* Sparsed frame. */
return 0;
}
if (clen >= clst_frame) {
/*
* The frame is not compressed 'cause
* it does not contain any sparse clusters
* it does not contain any sparse clusters.
*/
*clst_data = clst_frame;
return 0;
@ -1409,8 +1396,8 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
*clst_data = clen;
/*
* The frame is compressed if *clst_data + slen >= clst_frame
* Check next fragments
* The frame is compressed if *clst_data + slen >= clst_frame.
* Check next fragments.
*/
while ((vcn += clen) < alen) {
vcn_next = vcn;
@ -1433,8 +1420,8 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
} else {
if (slen) {
/*
* data_clusters + sparse_clusters =
* not enough for frame
* Data_clusters + sparse_clusters =
* not enough for frame.
*/
return -EINVAL;
}
@ -1445,11 +1432,11 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
if (!slen) {
/*
* There is no sparsed clusters in this frame
* So it is not compressed
* so it is not compressed.
*/
*clst_data = clst_frame;
} else {
/*frame is compressed*/
/* Frame is compressed. */
}
break;
}
@ -1459,10 +1446,9 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
}
/*
* attr_allocate_frame
* attr_allocate_frame - Allocate/free clusters for @frame.
*
* allocate/free clusters for 'frame'
* assumed: down_write(&ni->file.run_lock);
* Assumed: down_write(&ni->file.run_lock);
*/
int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
u64 new_valid)
@ -1538,10 +1524,10 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
goto out;
}
end = vcn + clst_data;
/* run contains updated range [vcn + len : end) */
/* Run contains updated range [vcn + len : end). */
} else {
CLST alen, hint = 0;
/* Get the last lcn to allocate from */
/* Get the last LCN to allocate from. */
if (vcn + clst_data &&
!run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
NULL)) {
@ -1555,7 +1541,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
goto out;
end = vcn + len;
/* run contains updated range [vcn + clst_data : end) */
/* Run contains updated range [vcn + clst_data : end). */
}
total_size += (u64)len << sbi->cluster_bits;
@ -1571,20 +1557,20 @@ repack:
mi_b->dirty = true;
mark_inode_dirty(&ni->vfs_inode);
/* stored [vcn : next_svcn) from [vcn : end) */
/* Stored [vcn : next_svcn) from [vcn : end). */
next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
if (end <= evcn1) {
if (next_svcn == evcn1) {
/* Normal way. update attribute and exit */
/* Normal way. Update attribute and exit. */
goto ok;
}
/* add new segment [next_svcn : evcn1 - next_svcn )*/
/* Add new segment [next_svcn : evcn1 - next_svcn). */
if (!ni->attr_list.size) {
err = ni_create_attr_list(ni);
if (err)
goto out;
/* layout of records is changed */
/* Layout of records is changed. */
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
0, NULL, &mi_b);
@ -1602,7 +1588,7 @@ repack:
svcn = evcn1;
/* Estimate next attribute */
/* Estimate next attribute. */
attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
if (attr) {
@ -1613,7 +1599,7 @@ repack:
if (end < next_svcn)
end = next_svcn;
while (end > evcn) {
/* remove segment [svcn : evcn)*/
/* Remove segment [svcn : evcn). */
mi_remove_attr(mi, attr);
if (!al_remove_le(ni, le)) {
@ -1622,7 +1608,7 @@ repack:
}
if (evcn + 1 >= alloc) {
/* last attribute segment */
/* Last attribute segment. */
evcn1 = evcn + 1;
goto ins_ext;
}
@ -1684,7 +1670,9 @@ out:
return err;
}
/* Collapse range in file */
/*
* attr_collapse_range - Collapse range in file.
*/
int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
{
int err = 0;
@ -1725,7 +1713,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
}
if ((vbo & mask) || (bytes & mask)) {
/* allow to collapse only cluster aligned ranges */
/* Allow to collapse only cluster aligned ranges. */
return -EINVAL;
}
@ -1737,7 +1725,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (vbo + bytes >= data_size) {
u64 new_valid = min(ni->i_valid, vbo);
/* Simple truncate file at 'vbo' */
/* Simple truncate file at 'vbo'. */
truncate_setsize(&ni->vfs_inode, vbo);
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
&new_valid, true, NULL);
@ -1749,7 +1737,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
}
/*
* Enumerate all attribute segments and collapse
* Enumerate all attribute segments and collapse.
*/
alen = alloc_size >> sbi->cluster_bits;
vcn = vbo >> sbi->cluster_bits;
@ -1782,7 +1770,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
for (;;) {
if (svcn >= end) {
/* shift vcn */
/* Shift VCN- */
attr->nres.svcn = cpu_to_le64(svcn - len);
attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
if (le) {
@ -1793,7 +1781,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
} else if (svcn < vcn || end < evcn1) {
CLST vcn1, eat, next_svcn;
/* collapse a part of this attribute segment */
/* Collapse a part of this attribute segment. */
err = attr_load_runs(attr, ni, run, &svcn);
if (err)
goto out;
@ -1811,7 +1799,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
}
if (svcn >= vcn) {
/* shift vcn */
/* Shift VCN */
attr->nres.svcn = cpu_to_le64(vcn);
if (le) {
le->vcn = attr->nres.svcn;
@ -1832,7 +1820,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
if (err)
goto out;
/* layout of records maybe changed */
/* Layout of records maybe changed. */
attr_b = NULL;
le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
&next_svcn);
@ -1842,18 +1830,18 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
}
}
/* free all allocated memory */
/* Free all allocated memory. */
run_truncate(run, 0);
} else {
u16 le_sz;
u16 roff = le16_to_cpu(attr->nres.run_off);
/*run==1 means unpack and deallocate*/
/* run==1 means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
evcn1 - 1, svcn, Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff);
/* delete this attribute segment */
/* Delete this attribute segment. */
mi_remove_attr(mi, attr);
if (!le)
break;
@ -1868,13 +1856,13 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
break;
if (!svcn) {
/* Load next record that contains this attribute */
/* Load next record that contains this attribute. */
if (ni_load_mi(ni, le, &mi)) {
err = -EINVAL;
goto out;
}
/* Look for required attribute */
/* Look for required attribute. */
attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
0, &le->id);
if (!attr) {
@ -1925,7 +1913,7 @@ next_attr:
attr_b->nres.total_size = cpu_to_le64(total_size);
mi_b->dirty = true;
/*update inode size*/
/* Update inode size. */
ni->i_valid = valid_size;
ni->vfs_inode.i_size = data_size;
inode_set_bytes(&ni->vfs_inode, total_size);
@ -1940,7 +1928,11 @@ out:
return err;
}
/* not for normal files */
/*
* attr_punch_hole
*
* Not for normal files.
*/
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
{
int err = 0;
@ -1981,7 +1973,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
total_size = le64_to_cpu(attr_b->nres.total_size);
if (vbo >= alloc_size) {
// NOTE: it is allowed
// NOTE: It is allowed.
return 0;
}
@ -2004,7 +1996,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
down_write(&ni->file.run_lock);
/*
* Enumerate all attribute segments and punch hole where necessary
* Enumerate all attribute segments and punch hole where necessary.
*/
alen = alloc_size >> sbi->cluster_bits;
vcn = vbo >> sbi->cluster_bits;
@ -2050,7 +2042,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
goto out;
if (dealloc2 == dealloc) {
/* looks like the required range is already sparsed */
/* Looks like the required range is already sparsed. */
} else {
if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
false)) {
@ -2062,7 +2054,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
if (err)
goto out;
}
/* free all allocated memory */
/* Free all allocated memory. */
run_truncate(run, 0);
if (evcn1 >= alen)
@ -2082,7 +2074,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
attr_b->nres.total_size = cpu_to_le64(total_size);
mi_b->dirty = true;
/*update inode size*/
/* Update inode size. */
inode_set_bytes(&ni->vfs_inode, total_size);
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
mark_inode_dirty(&ni->vfs_inode);

View File

@ -14,7 +14,11 @@
#include "ntfs.h"
#include "ntfs_fs.h"
/* Returns true if le is valid */
/*
* al_is_valid_le
*
* Return: True if @le is valid.
*/
static inline bool al_is_valid_le(const struct ntfs_inode *ni,
struct ATTR_LIST_ENTRY *le)
{
@ -101,8 +105,9 @@ out:
/*
* al_enumerate
*
* Returns the next list 'le'
* if 'le' is NULL then returns the first 'le'
* Return:
* * The next list le.
* * If @le is NULL then return the first le.
*/
struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
struct ATTR_LIST_ENTRY *le)
@ -115,22 +120,22 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
} else {
sz = le16_to_cpu(le->size);
if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
/* Impossible 'cause we should not return such 'le' */
/* Impossible 'cause we should not return such le. */
return NULL;
}
le = Add2Ptr(le, sz);
}
/* Check boundary */
/* Check boundary. */
off = PtrOffset(ni->attr_list.le, le);
if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
// The regular end of list
/* The regular end of list. */
return NULL;
}
sz = le16_to_cpu(le->size);
/* Check 'le' for errors */
/* Check le for errors. */
if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
off + sz > ni->attr_list.size ||
sz < le->name_off + le->name_len * sizeof(short)) {
@ -143,8 +148,9 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
/*
* al_find_le
*
* finds the first 'le' in the list which matches type, name and vcn
* Returns NULL if not found
* Find the first le in the list which matches type, name and VCN.
*
* Return: NULL if not found.
*/
struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
struct ATTR_LIST_ENTRY *le,
@ -159,8 +165,9 @@ struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
/*
* al_find_ex
*
* finds the first 'le' in the list which matches type, name and vcn
* Returns NULL if not found
* Find the first le in the list which matches type, name and VCN.
*
* Return: NULL if not found.
*/
struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
struct ATTR_LIST_ENTRY *le,
@ -174,7 +181,7 @@ struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
u64 le_vcn;
int diff = le32_to_cpu(le->type) - type_in;
/* List entries are sorted by type, name and vcn */
/* List entries are sorted by type, name and VCN. */
if (diff < 0)
continue;
@ -187,7 +194,7 @@ struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
le_vcn = le64_to_cpu(le->vcn);
if (!le_vcn) {
/*
* compare entry names only for entry with vcn == 0
* Compare entry names only for entry with vcn == 0.
*/
diff = ntfs_cmp_names(le_name(le), name_len, name,
name_len, ni->mi.sbi->upcase,
@ -217,7 +224,7 @@ struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
/*
* al_find_le_to_insert
*
* finds the first list entry which matches type, name and vcn
* Find the first list entry which matches type, name and VCN.
*/
static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
enum ATTR_TYPE type,
@ -227,7 +234,7 @@ static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
struct ATTR_LIST_ENTRY *le = NULL, *prev;
u32 type_in = le32_to_cpu(type);
/* List entries are sorted by type, name, vcn */
/* List entries are sorted by type, name and VCN. */
while ((le = al_enumerate(ni, prev = le))) {
int diff = le32_to_cpu(le->type) - type_in;
@ -239,7 +246,7 @@ static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
if (!le->vcn) {
/*
* compare entry names only for entry with vcn == 0
* Compare entry names only for entry with vcn == 0.
*/
diff = ntfs_cmp_names(le_name(le), le->name_len, name,
name_len, ni->mi.sbi->upcase,
@ -261,7 +268,7 @@ static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
/*
* al_add_le
*
* adds an "attribute list entry" to the list.
* Add an "attribute list entry" to the list.
*/
int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
@ -335,9 +342,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
}
/*
* al_remove_le
*
* removes 'le' from attribute list
* al_remove_le - Remove @le from attribute list.
*/
bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
{
@ -361,9 +366,7 @@ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
}
/*
* al_delete_le
*
* deletes from the list the first 'le' which matches its parameters.
* al_delete_le - Delete first le from the list which matches its parameters.
*/
bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
const __le16 *name, size_t name_len,
@ -374,7 +377,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
size_t off;
typeof(ni->attr_list) *al = &ni->attr_list;
/* Scan forward to the first 'le' that matches the input */
/* Scan forward to the first le that matches the input. */
le = al_find_ex(ni, NULL, type, name, name_len, &vcn);
if (!le)
return false;
@ -405,9 +408,9 @@ next:
goto next;
}
/* Save on stack the size of 'le' */
/* Save on stack the size of 'le'. */
size = le16_to_cpu(le->size);
/* Delete 'le'. */
/* Delete the le. */
memmove(le, Add2Ptr(le, size), al->size - (off + size));
al->size -= size;
@ -416,9 +419,6 @@ next:
return true;
}
/*
* al_update
*/
int al_update(struct ntfs_inode *ni)
{
int err;
@ -429,8 +429,8 @@ int al_update(struct ntfs_inode *ni)
return 0;
/*
* attribute list increased on demand in al_add_le
* attribute list decreased here
* Attribute list increased on demand in al_add_le.
* Attribute list decreased here.
*/
err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL,
false, &attr);

View File

@ -4,6 +4,7 @@
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
*/
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
@ -32,7 +33,7 @@ static const u8 zero_mask[] = { 0xFF, 0xFE, 0xFC, 0xF8, 0xF0,
/*
* are_bits_clear
*
* Returns true if all bits [bit, bit+nbits) are zeros "0"
* Return: True if all bits [bit, bit+nbits) are zeros "0".
*/
bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
{
@ -74,14 +75,13 @@ bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
if (pos && (*map & fill_mask[pos]))
return false;
// All bits are zero
return true;
}
/*
* are_bits_set
*
* Returns true if all bits [bit, bit+nbits) are ones "1"
* Return: True if all bits [bit, bit+nbits) are ones "1".
*/
bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
{
@ -130,6 +130,5 @@ bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
return false;
}
// All bits are ones
return true;
}

View File

@ -6,7 +6,7 @@
* This code builds two trees of free clusters extents.
* Trees are sorted by start of extent and by length of extent.
* NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
* In extreme case code reads on-disk bitmap to find free clusters
* In extreme case code reads on-disk bitmap to find free clusters.
*
*/
@ -29,12 +29,10 @@ struct rb_node_key {
size_t key;
};
/*
* Tree is sorted by start (key)
*/
/* Tree is sorted by start (key). */
struct e_node {
struct rb_node_key start; /* Tree sorted by start */
struct rb_node_key count; /* Tree sorted by len*/
struct rb_node_key start; /* Tree sorted by start. */
struct rb_node_key count; /* Tree sorted by len. */
};
static int wnd_rescan(struct wnd_bitmap *wnd);
@ -62,9 +60,12 @@ static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
}
/*
* b_pos + b_len - biggest fragment
* Scan range [wpos wbits) window 'buf'
* Returns -1 if not found
* wnd_scan
*
* b_pos + b_len - biggest fragment.
* Scan range [wpos wbits) window @buf.
*
* Return: -1 if not found.
*/
static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
size_t to_alloc, size_t *prev_tail, size_t *b_pos,
@ -96,7 +97,7 @@ static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
}
/*
* Now we have a fragment [wpos, wend) staring with 0
* Now we have a fragment [wpos, wend) staring with 0.
*/
end = wpos + to_alloc - *prev_tail;
free_bits = find_next_bit(buf, min(end, wend), wpos);
@ -125,9 +126,7 @@ static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
}
/*
* wnd_close
*
* Frees all resources
* wnd_close - Frees all resources.
*/
void wnd_close(struct wnd_bitmap *wnd)
{
@ -170,9 +169,7 @@ static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
}
/*
* rb_insert_count
*
* Helper function to insert special kind of 'count' tree
* rb_insert_count - Helper function to insert special kind of 'count' tree.
*/
static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
{
@ -205,9 +202,7 @@ static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
}
/*
* inline bool rb_insert_start
*
* Helper function to insert special kind of 'start' tree
* rb_insert_start - Helper function to insert special kind of 'count' tree.
*/
static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
{
@ -237,10 +232,8 @@ static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
}
/*
* wnd_add_free_ext
*
* adds a new extent of free space
* build = 1 when building tree
* wnd_add_free_ext - Adds a new extent of free space.
* @build: 1 when building tree.
*/
static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
bool build)
@ -250,14 +243,14 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
struct rb_node *n;
if (build) {
/* Use extent_min to filter too short extents */
/* Use extent_min to filter too short extents. */
if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
len <= wnd->extent_min) {
wnd->uptodated = -1;
return;
}
} else {
/* Try to find extent before 'bit' */
/* Try to find extent before 'bit'. */
n = rb_lookup(&wnd->start_tree, bit);
if (!n) {
@ -266,7 +259,7 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
e = rb_entry(n, struct e_node, start.node);
n = rb_next(n);
if (e->start.key + e->count.key == bit) {
/* Remove left */
/* Remove left. */
bit = e->start.key;
len += e->count.key;
rb_erase(&e->start.node, &wnd->start_tree);
@ -284,7 +277,7 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
if (e->start.key > end_in)
break;
/* Remove right */
/* Remove right. */
n = rb_next(n);
len += next_end - end_in;
end_in = next_end;
@ -299,7 +292,7 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
}
if (wnd->uptodated != 1) {
/* Check bits before 'bit' */
/* Check bits before 'bit'. */
ib = wnd->zone_bit == wnd->zone_end ||
bit < wnd->zone_end
? 0
@ -310,7 +303,7 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
len += 1;
}
/* Check bits after 'end_in' */
/* Check bits after 'end_in'. */
ib = wnd->zone_bit == wnd->zone_end ||
end_in > wnd->zone_bit
? wnd->nbits
@ -322,29 +315,29 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
}
}
}
/* Insert new fragment */
/* Insert new fragment. */
if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
if (e0)
kmem_cache_free(ntfs_enode_cachep, e0);
wnd->uptodated = -1;
/* Compare with smallest fragment */
/* Compare with smallest fragment. */
n = rb_last(&wnd->count_tree);
e = rb_entry(n, struct e_node, count.node);
if (len <= e->count.key)
goto out; /* Do not insert small fragments */
goto out; /* Do not insert small fragments. */
if (build) {
struct e_node *e2;
n = rb_prev(n);
e2 = rb_entry(n, struct e_node, count.node);
/* smallest fragment will be 'e2->count.key' */
/* Smallest fragment will be 'e2->count.key'. */
wnd->extent_min = e2->count.key;
}
/* Replace smallest fragment by new one */
/* Replace smallest fragment by new one. */
rb_erase(&e->start.node, &wnd->start_tree);
rb_erase(&e->count.node, &wnd->count_tree);
wnd->count -= 1;
@ -371,9 +364,7 @@ out:;
}
/*
* wnd_remove_free_ext
*
* removes a run from the cached free space
* wnd_remove_free_ext - Remove a run from the cached free space.
*/
static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
{
@ -382,7 +373,7 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
size_t end_in = bit + len;
size_t end3, end, new_key, new_len, max_new_len;
/* Try to find extent before 'bit' */
/* Try to find extent before 'bit'. */
n = rb_lookup(&wnd->start_tree, bit);
if (!n)
@ -394,11 +385,11 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
new_key = new_len = 0;
len = e->count.key;
/* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n' */
/* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */
if (e->start.key > bit)
;
else if (end_in <= end) {
/* Range [bit,end_in) inside 'e' */
/* Range [bit,end_in) inside 'e'. */
new_key = end_in;
new_len = end - end_in;
len = bit - e->start.key;
@ -478,13 +469,13 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
wnd->uptodated = -1;
/* Get minimal extent */
/* Get minimal extent. */
e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
count.node);
if (e->count.key > new_len)
goto out;
/* Replace minimum */
/* Replace minimum. */
rb_erase(&e->start.node, &wnd->start_tree);
rb_erase(&e->count.node, &wnd->count_tree);
wnd->count -= 1;
@ -508,9 +499,7 @@ out:
}
/*
* wnd_rescan
*
* Scan all bitmap. used while initialization.
* wnd_rescan - Scan all bitmap. Used while initialization.
*/
static int wnd_rescan(struct wnd_bitmap *wnd)
{
@ -541,7 +530,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
if (wnd->inited) {
if (!wnd->free_bits[iw]) {
/* all ones */
/* All ones. */
if (prev_tail) {
wnd_add_free_ext(wnd,
vbo * 8 - prev_tail,
@ -551,7 +540,7 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
goto next_wnd;
}
if (wbits == wnd->free_bits[iw]) {
/* all zeroes */
/* All zeroes. */
prev_tail += wbits;
wnd->total_zeroes += wbits;
goto next_wnd;
@ -604,14 +593,14 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
wpos = used;
if (wpos >= wbits) {
/* No free blocks */
/* No free blocks. */
prev_tail = 0;
break;
}
frb = find_next_bit(buf, wbits, wpos);
if (frb >= wbits) {
/* keep last free block */
/* Keep last free block. */
prev_tail += frb - wpos;
break;
}
@ -619,9 +608,9 @@ static int wnd_rescan(struct wnd_bitmap *wnd)
wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
frb + prev_tail - wpos, true);
/* Skip free block and first '1' */
/* Skip free block and first '1'. */
wpos = frb + 1;
/* Reset previous tail */
/* Reset previous tail. */
prev_tail = 0;
} while (wpos < wbits);
@ -638,15 +627,15 @@ next_wnd:
}
}
/* Add last block */
/* Add last block. */
if (prev_tail)
wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
/*
* Before init cycle wnd->uptodated was 0
* Before init cycle wnd->uptodated was 0.
* If any errors or limits occurs while initialization then
* wnd->uptodated will be -1
* If 'uptodated' is still 0 then Tree is really updated
* wnd->uptodated will be -1.
* If 'uptodated' is still 0 then Tree is really updated.
*/
if (!wnd->uptodated)
wnd->uptodated = 1;
@ -662,9 +651,6 @@ out:
return err;
}
/*
* wnd_init
*/
int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
{
int err;
@ -697,9 +683,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
}
/*
* wnd_map
*
* call sb_bread for requested window
* wnd_map - Call sb_bread for requested window.
*/
static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
{
@ -728,9 +712,7 @@ static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
}
/*
* wnd_set_free
*
* Marks the bits range from bit to bit + bits as free
* wnd_set_free - Mark the bits range from bit to bit + bits as free.
*/
int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
{
@ -783,9 +765,7 @@ int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
}
/*
* wnd_set_used
*
* Marks the bits range from bit to bit + bits as used
* wnd_set_used - Mark the bits range from bit to bit + bits as used.
*/
int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
{
@ -839,7 +819,7 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
/*
* wnd_is_free_hlp
*
* Returns true if all clusters [bit, bit+bits) are free (bitmap only)
* Return: True if all clusters [bit, bit+bits) are free (bitmap only).
*/
static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
{
@ -882,7 +862,7 @@ static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
/*
* wnd_is_free
*
* Returns true if all clusters [bit, bit+bits) are free
* Return: True if all clusters [bit, bit+bits) are free.
*/
bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
{
@ -914,7 +894,7 @@ use_wnd:
/*
* wnd_is_used
*
* Returns true if all clusters [bit, bit+bits) are used
* Return: True if all clusters [bit, bit+bits) are used.
*/
bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
{
@ -973,11 +953,11 @@ out:
}
/*
* wnd_find
* wnd_find - Look for free space.
*
* - flags - BITMAP_FIND_XXX flags
*
* looks for free space
* Returns 0 if not found
* Return: 0 if not found.
*/
size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
size_t flags, size_t *allocated)
@ -994,7 +974,7 @@ size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
bool fbits_valid;
struct buffer_head *bh;
/* fast checking for available free space */
/* Fast checking for available free space. */
if (flags & BITMAP_FIND_FULL) {
size_t zeroes = wnd_zeroes(wnd);
@ -1020,7 +1000,7 @@ size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
if (RB_EMPTY_ROOT(&wnd->start_tree)) {
if (wnd->uptodated == 1) {
/* extents tree is updated -> no free space */
/* Extents tree is updated -> No free space. */
goto no_space;
}
goto scan_bitmap;
@ -1030,7 +1010,7 @@ size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
if (!hint)
goto allocate_biggest;
/* Use hint: enumerate extents by start >= hint */
/* Use hint: Enumerate extents by start >= hint. */
pr = NULL;
cr = wnd->start_tree.rb_node;
@ -1059,7 +1039,7 @@ size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
goto allocate_biggest;
if (e->start.key + e->count.key > hint) {
/* We have found extension with 'hint' inside */
/* We have found extension with 'hint' inside. */
size_t len = e->start.key + e->count.key - hint;
if (len >= to_alloc && hint + to_alloc <= max_alloc) {
@ -1080,7 +1060,7 @@ size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
}
allocate_biggest:
/* Allocate from biggest free extent */
/* Allocate from biggest free extent. */
e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
if (e->count.key != wnd->extent_max)
wnd->extent_max = e->count.key;
@ -1090,14 +1070,14 @@ allocate_biggest:
;
} else if (flags & BITMAP_FIND_FULL) {
if (e->count.key < to_alloc0) {
/* Biggest free block is less then requested */
/* Biggest free block is less then requested. */
goto no_space;
}
to_alloc = e->count.key;
} else if (-1 != wnd->uptodated) {
to_alloc = e->count.key;
} else {
/* Check if we can use more bits */
/* Check if we can use more bits. */
size_t op, max_check;
struct rb_root start_tree;
@ -1118,7 +1098,7 @@ allocate_biggest:
to_alloc = op - e->start.key;
}
/* Prepare to return */
/* Prepare to return. */
fnd = e->start.key;
if (e->start.key + to_alloc > max_alloc)
to_alloc = max_alloc - e->start.key;
@ -1126,7 +1106,7 @@ allocate_biggest:
}
if (wnd->uptodated == 1) {
/* extents tree is updated -> no free space */
/* Extents tree is updated -> no free space. */
goto no_space;
}
@ -1140,7 +1120,7 @@ scan_bitmap:
/* At most two ranges [hint, max_alloc) + [0, hint) */
Again:
/* TODO: optimize request for case nbits > wbits */
/* TODO: Optimize request for case nbits > wbits. */
iw = hint >> log2_bits;
wbits = sb->s_blocksize * 8;
wpos = hint & (wbits - 1);
@ -1155,7 +1135,7 @@ Again:
nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
}
/* Enumerate all windows */
/* Enumerate all windows. */
for (; iw < nwnd; iw++) {
wbit = iw << log2_bits;
@ -1165,7 +1145,7 @@ Again:
b_len = prev_tail;
}
/* Skip full used window */
/* Skip full used window. */
prev_tail = 0;
wpos = 0;
continue;
@ -1189,25 +1169,25 @@ Again:
zbit = max(wnd->zone_bit, wbit);
zend = min(wnd->zone_end, ebit);
/* Here we have a window [wbit, ebit) and zone [zbit, zend) */
/* Here we have a window [wbit, ebit) and zone [zbit, zend). */
if (zend <= zbit) {
/* Zone does not overlap window */
/* Zone does not overlap window. */
} else {
wzbit = zbit - wbit;
wzend = zend - wbit;
/* Zone overlaps window */
/* Zone overlaps window. */
if (wnd->free_bits[iw] == wzend - wzbit) {
prev_tail = 0;
wpos = 0;
continue;
}
/* Scan two ranges window: [wbit, zbit) and [zend, ebit) */
/* Scan two ranges window: [wbit, zbit) and [zend, ebit). */
bh = wnd_map(wnd, iw);
if (IS_ERR(bh)) {
/* TODO: error */
/* TODO: Error */
prev_tail = 0;
wpos = 0;
continue;
@ -1215,9 +1195,9 @@ Again:
buf = (ulong *)bh->b_data;
/* Scan range [wbit, zbit) */
/* Scan range [wbit, zbit). */
if (wpos < wzbit) {
/* Scan range [wpos, zbit) */
/* Scan range [wpos, zbit). */
fnd = wnd_scan(buf, wbit, wpos, wzbit,
to_alloc, &prev_tail,
&b_pos, &b_len);
@ -1229,7 +1209,7 @@ Again:
prev_tail = 0;
/* Scan range [zend, ebit) */
/* Scan range [zend, ebit). */
if (wzend < wbits) {
fnd = wnd_scan(buf, wbit,
max(wzend, wpos), wbits,
@ -1247,24 +1227,24 @@ Again:
}
}
/* Current window does not overlap zone */
/* Current window does not overlap zone. */
if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
/* window is empty */
/* Window is empty. */
if (prev_tail + wbits >= to_alloc) {
fnd = wbit + wpos - prev_tail;
goto found;
}
/* Increase 'prev_tail' and process next window */
/* Increase 'prev_tail' and process next window. */
prev_tail += wbits;
wpos = 0;
continue;
}
/* read window */
/* Read window */
bh = wnd_map(wnd, iw);
if (IS_ERR(bh)) {
// TODO: error
// TODO: Error.
prev_tail = 0;
wpos = 0;
continue;
@ -1272,7 +1252,7 @@ Again:
buf = (ulong *)bh->b_data;
/* Scan range [wpos, eBits) */
/* Scan range [wpos, eBits). */
fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
&b_pos, &b_len);
put_bh(bh);
@ -1281,15 +1261,15 @@ Again:
}
if (b_len < prev_tail) {
/* The last fragment */
/* The last fragment. */
b_len = prev_tail;
b_pos = max_alloc - prev_tail;
}
if (hint) {
/*
* We have scanned range [hint max_alloc)
* Prepare to scan range [0 hint + to_alloc)
* We have scanned range [hint max_alloc).
* Prepare to scan range [0 hint + to_alloc).
*/
size_t nextmax = hint + to_alloc;
@ -1312,7 +1292,7 @@ Again:
found:
if (flags & BITMAP_FIND_MARK_AS_USED) {
/* TODO optimize remove extent (pass 'e'?) */
/* TODO: Optimize remove extent (pass 'e'?). */
if (wnd_set_used(wnd, fnd, to_alloc))
goto no_space;
} else if (wnd->extent_max != MINUS_ONE_T &&
@ -1328,9 +1308,7 @@ no_space:
}
/*
* wnd_extend
*
* Extend bitmap ($MFT bitmap)
* wnd_extend - Extend bitmap ($MFT bitmap).
*/
int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
{
@ -1347,7 +1325,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
if (new_bits <= old_bits)
return -EINVAL;
/* align to 8 byte boundary */
/* Align to 8 byte boundary. */
new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
new_last = new_bits & (wbits - 1);
if (!new_last)
@ -1367,7 +1345,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
wnd->free_bits = new_free;
}
/* Zero bits [old_bits,new_bits) */
/* Zero bits [old_bits,new_bits). */
bits = new_bits - old_bits;
b0 = old_bits & (wbits - 1);
@ -1403,7 +1381,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
/*err = sync_dirty_buffer(bh);*/
/* err = sync_dirty_buffer(bh); */
b0 = 0;
bits -= op;
@ -1418,9 +1396,6 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
return 0;
}
/*
* wnd_zone_set
*/
void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
{
size_t zlen;
@ -1502,7 +1477,7 @@ int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
put_bh(bh);
}
/* Process the last fragment */
/* Process the last fragment. */
if (len >= minlen) {
err = ntfs_discard(sbi, lcn, len);
if (err)

View File

@ -3,7 +3,8 @@
*
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
* useful functions for debugging
* Useful functions for debugging.
*
*/
// clang-format off
@ -33,7 +34,7 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
#endif
/*
* Logging macros ( thanks Joe Perches <joe@perches.com> for implementation )
* Logging macros. Thanks Joe Perches <joe@perches.com> for implementation.
*/
#define ntfs_err(sb, fmt, ...) ntfs_printk(sb, KERN_ERR fmt, ##__VA_ARGS__)

View File

@ -3,9 +3,10 @@
*
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
* directory handling functions for ntfs-based filesystems
* Directory handling functions for NTFS-based filesystems.
*
*/
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
@ -16,9 +17,7 @@
#include "ntfs.h"
#include "ntfs_fs.h"
/*
* Convert little endian utf16 to nls string
*/
/* Convert little endian UTF-16 to NLS string. */
int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
u8 *buf, int buf_len)
{
@ -30,7 +29,7 @@ int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
static_assert(sizeof(wchar_t) == sizeof(__le16));
if (!nls) {
/* utf16 -> utf8 */
/* UTF-16 -> UTF-8 */
ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
UTF16_LITTLE_ENDIAN, buf, buf_len);
buf[ret] = '\0';
@ -89,8 +88,9 @@ int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
// clang-format on
/*
* modified version of put_utf16 from fs/nls/nls_base.c
* is sparse warnings free
* put_utf16 - Modified version of put_utf16 from fs/nls/nls_base.c
*
* Function is sparse warnings free.
*/
static inline void put_utf16(wchar_t *s, unsigned int c,
enum utf16_endian endian)
@ -112,8 +112,10 @@ static inline void put_utf16(wchar_t *s, unsigned int c,
}
/*
* modified version of 'utf8s_to_utf16s' allows to
* detect -ENAMETOOLONG without writing out of expected maximum
* _utf8s_to_utf16s
*
* Modified version of 'utf8s_to_utf16s' allows to
* detect -ENAMETOOLONG without writing out of expected maximum.
*/
static int _utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
wchar_t *pwcs, int maxout)
@ -165,17 +167,18 @@ static int _utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
}
/*
* Convert input string to utf16
*
* name, name_len - input name
* uni, max_ulen - destination memory
* endian - endian of target utf16 string
* ntfs_nls_to_utf16 - Convert input string to UTF-16.
* @name: Input name.
* @name_len: Input name length.
* @uni: Destination memory.
* @max_ulen: Destination memory.
* @endian: Endian of target UTF-16 string.
*
* This function is called:
* - to create ntfs name
* - to create NTFS name
* - to create symlink
*
* returns utf16 string length or error (if negative)
* Return: UTF-16 string length or error (if negative).
*/
int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
struct cpu_str *uni, u32 max_ulen,
@ -230,7 +233,9 @@ int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
return ret;
}
/* helper function */
/*
* dir_search_u - Helper function.
*/
struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
struct ntfs_fnd *fnd)
{
@ -295,7 +300,7 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
if (ino == MFT_REC_ROOT)
return 0;
/* Skip meta files ( unless option to show metafiles is set ) */
/* Skip meta files. Unless option to show metafiles is set. */
if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
return 0;
@ -316,9 +321,7 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
}
/*
* ntfs_read_hdr
*
* helper function 'ntfs_readdir'
* ntfs_read_hdr - Helper function for ntfs_readdir().
*/
static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
@ -342,7 +345,7 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
if (de_is_last(e))
return 0;
/* Skip already enumerated*/
/* Skip already enumerated. */
if (vbo + off < pos)
continue;
@ -359,11 +362,11 @@ static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
}
/*
* file_operations::iterate_shared
* ntfs_readdir - file_operations::iterate_shared
*
* Use non sorted enumeration.
* We have an example of broken volume where sorted enumeration
* counts each name twice
* counts each name twice.
*/
static int ntfs_readdir(struct file *file, struct dir_context *ctx)
{
@ -382,7 +385,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
struct indx_node *node = NULL;
u8 index_bits = ni->dir.index_bits;
/* name is a buffer of PATH_MAX length */
/* Name is a buffer of PATH_MAX length. */
static_assert(NTFS_NAME_LEN * 4 < PATH_MAX);
eod = i_size + sbi->record_size;
@ -393,16 +396,16 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
if (!dir_emit_dots(file, ctx))
return 0;
/* allocate PATH_MAX bytes */
/* Allocate PATH_MAX bytes. */
name = __getname();
if (!name)
return -ENOMEM;
if (!ni->mi_loaded && ni->attr_list.size) {
/*
* directory inode is locked for read
* load all subrecords to avoid 'write' access to 'ni' during
* directory reading
* Directory inode is locked for read.
* Load all subrecords to avoid 'write' access to 'ni' during
* directory reading.
*/
ni_lock(ni);
if (!ni->mi_loaded && ni->attr_list.size) {

View File

@ -3,8 +3,10 @@
*
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
* regular file handling primitives for ntfs-based filesystems
* Regular file handling primitives for NTFS-based filesystems.
*
*/
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/compat.h>
@ -62,7 +64,7 @@ static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
case FITRIM:
return ntfs_ioctl_fitrim(sbi, arg);
}
return -ENOTTY; /* Inappropriate ioctl for device */
return -ENOTTY; /* Inappropriate ioctl for device. */
}
#ifdef CONFIG_COMPAT
@ -74,7 +76,7 @@ static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
#endif
/*
* inode_operations::getattr
* ntfs_getattr - inode_operations::getattr
*/
int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags)
@ -170,7 +172,7 @@ static int ntfs_extend_initialized_size(struct file *file,
zero_user_segment(page, zerofrom, PAGE_SIZE);
/* this function in any case puts page*/
/* This function in any case puts page. */
err = pagecache_write_end(file, mapping, pos, len, len, page,
fsdata);
if (err < 0)
@ -195,9 +197,7 @@ out:
}
/*
* ntfs_zero_range
*
* Helper function for punch_hole.
* ntfs_zero_range - Helper function for punch_hole.
* It zeroes a range [vbo, vbo_to)
*/
static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
@ -356,7 +356,7 @@ void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
}
/*
* file_operations::mmap
* ntfs_file_mmap - file_operations::mmap
*/
static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
@ -387,7 +387,7 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
from + vma->vm_end - vma->vm_start);
if (is_sparsed(ni)) {
/* allocate clusters for rw map */
/* Allocate clusters for rw map. */
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
CLST lcn, len;
CLST vcn = from >> sbi->cluster_bits;
@ -436,7 +436,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
if (end <= inode->i_size && !extend_init)
return 0;
/*mark rw ntfs as dirty. it will be cleared at umount*/
/* Mark rw ntfs as dirty. It will be cleared at umount. */
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
if (end > inode->i_size) {
@ -530,6 +530,8 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
}
/*
* ntfs_fallocate
*
* Preallocate space for a file. This implements ntfs's fallocate file
* operation, which gets called from sys_fallocate system call. User
* space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
@ -547,11 +549,11 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
loff_t i_size;
int err;
/* No support for dir */
/* No support for dir. */
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
/* Return error if mode is not supported */
/* Return error if mode is not supported. */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE)) {
ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
@ -565,7 +567,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
i_size = inode->i_size;
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* should never be here, see ntfs_file_open*/
/* Should never be here, see ntfs_file_open. */
err = -EOPNOTSUPP;
goto out;
}
@ -646,7 +648,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
/*
* Write data that will be shifted to preserve them
* when discarding page cache below
* when discarding page cache below.
*/
err = filemap_write_and_wait_range(inode->i_mapping, end,
LLONG_MAX);
@ -663,7 +665,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_unlock(ni);
} else {
/*
* normal file: allocate clusters, do not change 'valid' size
* Normal file: Allocate clusters, do not change 'valid' size.
*/
err = ntfs_set_size(inode, max(end, i_size));
if (err)
@ -677,10 +679,10 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
bool new;
/*
* allocate but not zero new clusters (see below comments)
* this breaks security (one can read unused on-disk areas)
* zeroing these clusters may be too long
* may be we should check here for root rights?
* Allocate but do not zero new clusters. (see below comments)
* This breaks security: One can read unused on-disk areas.
* Zeroing these clusters may be too long.
* Maybe we should check here for root rights?
*/
for (; vcn < cend; vcn += clen) {
err = attr_data_get_block(ni, vcn, cend - vcn,
@ -691,15 +693,15 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
continue;
/*
* Unwritten area
* NTFS is not able to store several unwritten areas
* Activate 'ntfs_sparse_cluster' to zero new allocated clusters
* Unwritten area.
* NTFS is not able to store several unwritten areas.
* Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
*
* Dangerous in case:
* 1G of sparsed clusters + 1 cluster of data =>
* valid_size == 1G + 1 cluster
* fallocate(1G) will zero 1G and this can be very long
* xfstest 016/086 will fail without 'ntfs_sparse_cluster'
* xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
*/
ntfs_sparse_cluster(inode, NULL, vcn,
min(vcn_v - vcn, clen));
@ -708,7 +710,7 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
if (mode & FALLOC_FL_KEEP_SIZE) {
ni_lock(ni);
/*true - keep preallocated*/
/* True - Keep preallocated. */
err = attr_set_size(ni, ATTR_DATA, NULL, 0,
&ni->file.run, i_size, &ni->i_valid,
true, NULL);
@ -730,7 +732,7 @@ out:
}
/*
* inode_operations::setattr
* ntfs3_setattr - inode_operations::setattr
*/
int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr)
@ -744,9 +746,9 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
int err;
if (sbi->options.no_acs_rules) {
/* "no access rules" - force any changes of time etc. */
/* "No access rules" - Force any changes of time etc. */
attr->ia_valid |= ATTR_FORCE;
/* and disable for editing some attributes */
/* and disable for editing some attributes. */
attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
ia_valid = attr->ia_valid;
}
@ -759,7 +761,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
loff_t oldsize = inode->i_size;
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* should never be here, see ntfs_file_open*/
/* Should never be here, see ntfs_file_open(). */
err = -EOPNOTSUPP;
goto out;
}
@ -783,7 +785,7 @@ int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (err)
goto out;
/* linux 'w' -> windows 'ro' */
/* Linux 'w' -> Windows 'ro'. */
if (0222 & inode->i_mode)
ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
else
@ -834,7 +836,11 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
return err;
}
/* returns array of locked pages */
/*
* ntfs_get_frame_pages
*
* Return: Array of locked pages.
*/
static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
struct page **pages, u32 pages_per_frame,
bool *frame_uptodate)
@ -867,7 +873,9 @@ static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
return 0;
}
/*helper for ntfs_file_write_iter (compressed files)*/
/*
* ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
*/
static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
{
int err;
@ -913,7 +921,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
if (err)
goto out;
/* zero range [valid : pos) */
/* Zero range [valid : pos). */
while (valid < pos) {
CLST lcn, clen;
@ -932,7 +940,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
continue;
}
/* Load full frame */
/* Load full frame. */
err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
pages, pages_per_frame,
&frame_uptodate);
@ -978,7 +986,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
ni->i_valid = valid = frame_vbo + frame_size;
}
/* copy user data [pos : pos + count) */
/* Copy user data [pos : pos + count). */
while (count) {
size_t copied, bytes;
@ -996,7 +1004,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
/* Load full frame */
/* Load full frame. */
err = ntfs_get_frame_pages(mapping, index, pages,
pages_per_frame, &frame_uptodate);
if (err)
@ -1025,7 +1033,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
ip = off >> PAGE_SHIFT;
off = offset_in_page(pos);
/* copy user data to pages */
/* Copy user data to pages. */
for (;;) {
size_t cp, tail = PAGE_SIZE - off;
@ -1091,7 +1099,7 @@ out:
}
/*
* file_operations::write_iter
* ntfs_file_write_iter - file_operations::write_iter
*/
static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
@ -1127,7 +1135,7 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* should never be here, see ntfs_file_open*/
/* Should never be here, see ntfs_file_open() */
ret = -EOPNOTSUPP;
goto out;
}
@ -1149,7 +1157,7 @@ out:
}
/*
* file_operations::open
* ntfs_file_open - file_operations::open
*/
int ntfs_file_open(struct inode *inode, struct file *file)
{
@ -1160,7 +1168,7 @@ int ntfs_file_open(struct inode *inode, struct file *file)
return -EOPNOTSUPP;
}
/* Decompress "external compressed" file if opened for rw */
/* Decompress "external compressed" file if opened for rw. */
if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
(file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
#ifdef CONFIG_NTFS3_LZX_XPRESS
@ -1180,7 +1188,7 @@ int ntfs_file_open(struct inode *inode, struct file *file)
}
/*
* file_operations::release
* ntfs_file_release - file_operations::release
*/
static int ntfs_file_release(struct inode *inode, struct file *file)
{
@ -1188,7 +1196,7 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
struct ntfs_sb_info *sbi = ni->mi.sbi;
int err = 0;
/* if we are the last writer on the inode, drop the block reservation */
/* If we are last writer on the inode, drop the block reservation. */
if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
atomic_read(&inode->i_writecount) == 1)) {
ni_lock(ni);
@ -1203,7 +1211,9 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
return err;
}
/* file_operations::fiemap */
/*
* ntfs_fiemap - file_operations::fiemap
*/
int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -101,9 +101,7 @@ const __le16 WOF_NAME[17] = {
// clang-format on
/*
* ntfs_fix_pre_write
*
* inserts fixups into 'rhdr' before writing to disk
* ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
*/
bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
{
@ -117,7 +115,7 @@ bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
return false;
}
/* Get fixup pointer */
/* Get fixup pointer. */
fixup = Add2Ptr(rhdr, fo);
if (*fixup >= 0x7FFF)
@ -138,10 +136,9 @@ bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
}
/*
* ntfs_fix_post_read
* ntfs_fix_post_read - Remove fixups after reading from disk.
*
* remove fixups after reading from disk
* Returns < 0 if error, 0 if ok, 1 if need to update fixups
* Return: < 0 if error, 0 if ok, 1 if need to update fixups.
*/
int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
bool simple)
@ -154,26 +151,26 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
: le16_to_cpu(rhdr->fix_num);
/* Check errors */
/* Check errors. */
if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
fn * SECTOR_SIZE > bytes) {
return -EINVAL; /* native chkntfs returns ok! */
return -EINVAL; /* Native chkntfs returns ok! */
}
/* Get fixup pointer */
/* Get fixup pointer. */
fixup = Add2Ptr(rhdr, fo);
sample = *fixup;
ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
ret = 0;
while (fn--) {
/* Test current word */
/* Test current word. */
if (*ptr != sample) {
/* Fixup does not match! Is it serious error? */
ret = -E_NTFS_FIXUP;
}
/* Replace fixup */
/* Replace fixup. */
*ptr = *++fixup;
ptr += SECTOR_SIZE / sizeof(short);
}
@ -182,9 +179,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
}
/*
* ntfs_extend_init
*
* loads $Extend file
* ntfs_extend_init - Load $Extend file.
*/
int ntfs_extend_init(struct ntfs_sb_info *sbi)
{
@ -209,7 +204,7 @@ int ntfs_extend_init(struct ntfs_sb_info *sbi)
goto out;
}
/* if ntfs_iget5 reads from disk it never returns bad inode */
/* If ntfs_iget5() reads from disk it never returns bad inode. */
if (!S_ISDIR(inode->i_mode)) {
err = -EINVAL;
goto out;
@ -261,7 +256,7 @@ int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
struct MFT_REF ref;
struct inode *inode;
/* Check for 4GB */
/* Check for 4GB. */
if (ni->vfs_inode.i_size >= 0x100000000ull) {
ntfs_err(sb, "\x24LogFile is too big");
err = -EINVAL;
@ -280,7 +275,7 @@ int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
inode = NULL;
if (!inode) {
/* Try to use mft copy */
/* Try to use MFT copy. */
u64 t64 = sbi->mft.lbo;
sbi->mft.lbo = sbi->mft.lbo2;
@ -298,7 +293,7 @@ int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
sbi->mft.ni = ntfs_i(inode);
/* LogFile should not contains attribute list */
/* LogFile should not contains attribute list. */
err = ni_load_all_mi(sbi->mft.ni);
if (!err)
err = log_replay(ni, &initialized);
@ -317,7 +312,7 @@ int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
if (sb_rdonly(sb) || !initialized)
goto out;
/* fill LogFile by '-1' if it is initialized */
/* Fill LogFile by '-1' if it is initialized.ssss */
err = ntfs_bio_fill_1(sbi, &ni->file.run);
out:
@ -329,7 +324,7 @@ out:
/*
* ntfs_query_def
*
* returns current ATTR_DEF_ENTRY for given attribute type
* Return: Current ATTR_DEF_ENTRY for given attribute type.
*/
const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
enum ATTR_TYPE type)
@ -356,9 +351,7 @@ const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
}
/*
* ntfs_look_for_free_space
*
* looks for a free space in bitmap
* ntfs_look_for_free_space - Look for a free space in bitmap.
*/
int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
CLST *new_lcn, CLST *new_len,
@ -406,7 +399,7 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
/*
* 'Cause cluster 0 is always used this value means that we should use
* cached value of 'next_free_lcn' to improve performance
* cached value of 'next_free_lcn' to improve performance.
*/
if (!lcn)
lcn = sbi->used.next_free_lcn;
@ -420,18 +413,18 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
goto ok;
}
/* Try to use clusters from MftZone */
/* Try to use clusters from MftZone. */
zlen = wnd_zone_len(wnd);
zeroes = wnd_zeroes(wnd);
/* Check too big request */
/* Check too big request. */
if (len > zeroes + zlen)
goto no_space;
if (zlen <= NTFS_MIN_MFT_ZONE)
goto no_space;
/* How many clusters to cat from zone */
/* How many clusters to cat from zone. */
zlcn = wnd_zone_bit(wnd);
zlen2 = zlen >> 1;
ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
@ -445,7 +438,7 @@ int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
wnd_zone_set(wnd, zlcn, new_zlen);
/* allocate continues clusters */
/* Allocate continues clusters. */
*new_len =
wnd_find(wnd, len, 0,
BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &a_lcn);
@ -467,7 +460,7 @@ ok:
if (opt & ALLOCATE_MFT)
goto out;
/* Set hint for next requests */
/* Set hint for next requests. */
sbi->used.next_free_lcn = *new_lcn + *new_len;
out:
@ -476,10 +469,9 @@ out:
}
/*
* ntfs_extend_mft
* ntfs_extend_mft - Allocate additional MFT records.
*
* allocates additional MFT records
* sbi->mft.bitmap is locked for write
* sbi->mft.bitmap is locked for write.
*
* NOTE: recursive:
* ntfs_look_free_mft ->
@ -490,8 +482,9 @@ out:
* ni_ins_attr_ext ->
* ntfs_look_free_mft ->
* ntfs_extend_mft
* To avoid recursive always allocate space for two new mft records
* see attrib.c: "at least two mft to avoid recursive loop"
*
* To avoid recursive always allocate space for two new MFT records
* see attrib.c: "at least two MFT to avoid recursive loop".
*/
static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
{
@ -505,7 +498,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
/* Step 1: Resize $MFT::DATA */
/* Step 1: Resize $MFT::DATA. */
down_write(&ni->file.run_lock);
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
new_mft_bytes, NULL, false, &attr);
@ -519,13 +512,13 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
ni->mi.dirty = true;
/* Step 2: Resize $MFT::BITMAP */
/* Step 2: Resize $MFT::BITMAP. */
new_bitmap_bytes = bitmap_size(new_mft_total);
err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
/* Refresh Mft Zone if necessary */
/* Refresh MFT Zone if necessary. */
down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
ntfs_refresh_zone(sbi);
@ -549,9 +542,7 @@ out:
}
/*
* ntfs_look_free_mft
*
* looks for a free MFT record
* ntfs_look_free_mft - Look for a free MFT record.
*/
int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
struct ntfs_inode *ni, struct mft_inode **mi)
@ -572,7 +563,7 @@ int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
zlen = wnd_zone_len(wnd);
/* Always reserve space for MFT */
/* Always reserve space for MFT. */
if (zlen) {
if (mft) {
zbit = wnd_zone_bit(wnd);
@ -582,7 +573,7 @@ int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
goto found;
}
/* No MFT zone. find the nearest to '0' free MFT */
/* No MFT zone. Find the nearest to '0' free MFT. */
if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
/* Resize MFT */
mft_total = wnd->nbits;
@ -601,10 +592,10 @@ int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
/*
* Look for free record reserved area [11-16) ==
* [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
* marks it as used
* marks it as used.
*/
if (!sbi->mft.reserved_bitmap) {
/* Once per session create internal bitmap for 5 bits */
/* Once per session create internal bitmap for 5 bits. */
sbi->mft.reserved_bitmap = 0xFF;
ref.high = 0;
@ -671,7 +662,7 @@ reserve_mft:
while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
zlen -= 1;
/* [zbit, zbit + zlen) will be used for Mft itself */
/* [zbit, zbit + zlen) will be used for MFT itself. */
from = sbi->mft.used;
if (from < zbit)
from = zbit;
@ -692,7 +683,7 @@ reserve_mft:
found:
if (!mft) {
/* The request to get record for general purpose */
/* The request to get record for general purpose. */
if (sbi->mft.next_free < MFT_REC_USER)
sbi->mft.next_free = MFT_REC_USER;
@ -717,7 +708,7 @@ found:
goto out;
}
/* We have found a record that are not reserved for next MFT */
/* We have found a record that are not reserved for next MFT. */
if (*rno >= MFT_REC_FREE)
wnd_set_used(wnd, *rno, 1);
else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
@ -731,9 +722,7 @@ out:
}
/*
* ntfs_mark_rec_free
*
* marks record as free
* ntfs_mark_rec_free - Mark record as free.
*/
void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
{
@ -762,10 +751,9 @@ out:
}
/*
* ntfs_clear_mft_tail
* ntfs_clear_mft_tail - Format empty records [from, to).
*
* formats empty records [from, to)
* sbi->mft.bitmap is locked for write
* sbi->mft.bitmap is locked for write.
*/
int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
{
@ -804,12 +792,11 @@ out:
}
/*
* ntfs_refresh_zone
* ntfs_refresh_zone - Refresh MFT zone.
*
* refreshes Mft zone
* sbi->used.bitmap is locked for rw
* sbi->mft.bitmap is locked for write
* sbi->mft.ni->file.run_lock for write
* sbi->used.bitmap is locked for rw.
* sbi->mft.bitmap is locked for write.
* sbi->mft.ni->file.run_lock for write.
*/
int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
{
@ -818,14 +805,14 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
struct wnd_bitmap *wnd = &sbi->used.bitmap;
struct ntfs_inode *ni = sbi->mft.ni;
/* Do not change anything unless we have non empty Mft zone */
/* Do not change anything unless we have non empty MFT zone. */
if (wnd_zone_len(wnd))
return 0;
/*
* Compute the mft zone at two steps
* It would be nice if we are able to allocate
* 1/8 of total clusters for MFT but not more then 512 MB
* Compute the MFT zone at two steps.
* It would be nice if we are able to allocate 1/8 of
* total clusters for MFT but not more then 512 MB.
*/
zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
zone_max = wnd->nbits >> 3;
@ -838,29 +825,27 @@ int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
lcn = SPARSE_LCN;
/* We should always find Last Lcn for MFT */
/* We should always find Last Lcn for MFT. */
if (lcn == SPARSE_LCN)
return -EINVAL;
lcn_s = lcn + 1;
/* Try to allocate clusters after last MFT run */
/* Try to allocate clusters after last MFT run. */
zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
if (!zlen) {
ntfs_notice(sbi->sb, "MftZone: unavailable");
return 0;
}
/* Truncate too large zone */
/* Truncate too large zone. */
wnd_zone_set(wnd, lcn_s, zlen);
return 0;
}
/*
* ntfs_update_mftmirr
*
* updates $MFTMirr data
* ntfs_update_mftmirr - Update $MFTMirr data.
*/
int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
{
@ -923,9 +908,9 @@ out:
/*
* ntfs_set_state
*
* mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
* umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
* ntfs error: ntfs_set_state(NTFS_DIRTY_ERROR)
* Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
* Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
* NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
*/
int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
{
@ -936,14 +921,14 @@ int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
struct ntfs_inode *ni;
/*
* do not change state if fs was real_dirty
* do not change state if fs already dirty(clear)
* do not change any thing if mounted read only
* Do not change state if fs was real_dirty.
* Do not change state if fs already dirty(clear).
* Do not change any thing if mounted read only.
*/
if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
return 0;
/* Check cached value */
/* Check cached value. */
if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
(sbi->volume.flags & VOLUME_FLAG_DIRTY))
return 0;
@ -978,7 +963,7 @@ int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
info->flags &= ~VOLUME_FLAG_DIRTY;
break;
}
/* cache current volume flags*/
/* Cache current volume flags. */
sbi->volume.flags = info->flags;
mi->dirty = true;
err = 0;
@ -989,7 +974,7 @@ out:
return err;
mark_inode_dirty(&ni->vfs_inode);
/*verify(!ntfs_update_mftmirr()); */
/* verify(!ntfs_update_mftmirr()); */
/*
* if we used wait=1, sync_inode_metadata waits for the io for the
@ -1005,9 +990,7 @@ out:
}
/*
* security_hash
*
* calculates a hash of security descriptor
* security_hash - Calculates a hash of security descriptor.
*/
static inline __le32 security_hash(const void *sd, size_t bytes)
{
@ -1193,13 +1176,13 @@ int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct buffer_head *bh;
if (!run) {
/* first reading of $Volume + $MFTMirr + LogFile goes here*/
/* First reading of $Volume + $MFTMirr + $LogFile goes here. */
if (vbo > MFT_REC_VOL * sbi->record_size) {
err = -ENOENT;
goto out;
}
/* use absolute boot's 'MFTCluster' to read record */
/* Use absolute boot's 'MFTCluster' to read record. */
lbo = vbo + sbi->mft.lbo;
len = sbi->record_size;
} else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
@ -1290,7 +1273,11 @@ out:
return err;
}
/* Returns < 0 if error, 0 if ok, '-E_NTFS_FIXUP' if need to update fixups */
/*
* ntfs_read_bh
*
* Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
*/
int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
struct ntfs_buffers *nb)
@ -1487,7 +1474,9 @@ static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
return bio;
}
/* read/write pages from/to disk*/
/*
* ntfs_bio_pages - Read/write pages from/to disk.
*/
int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
u32 op)
@ -1509,7 +1498,7 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
blk_start_plug(&plug);
/* align vbo and bytes to be 512 bytes aligned */
/* Align vbo and bytes to be 512 bytes aligned. */
lbo = (vbo + bytes + 511) & ~511ull;
vbo = vbo & ~511ull;
bytes = lbo - vbo;
@ -1588,9 +1577,10 @@ out:
}
/*
* Helper for ntfs_loadlog_and_replay
* fill on-disk logfile range by (-1)
* this means empty logfile
* ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
*
* Fill on-disk logfile range by (-1)
* this means empty logfile.
*/
int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
{
@ -1622,7 +1612,7 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
}
/*
* TODO: try blkdev_issue_write_same
* TODO: Try blkdev_issue_write_same.
*/
blk_start_plug(&plug);
do {
@ -1719,8 +1709,8 @@ out:
/*
* O:BAG:BAD:(A;OICI;FA;;;WD)
* owner S-1-5-32-544 (Administrators)
* group S-1-5-32-544 (Administrators)
* Owner S-1-5-32-544 (Administrators)
* Group S-1-5-32-544 (Administrators)
* ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
*/
const u8 s_default_security[] __aligned(8) = {
@ -1741,7 +1731,9 @@ static inline u32 sid_length(const struct SID *sid)
}
/*
* Thanks Mark Harmstone for idea
* is_acl_valid
*
* Thanks Mark Harmstone for idea.
*/
static bool is_acl_valid(const struct ACL *acl, u32 len)
{
@ -1857,9 +1849,7 @@ bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
}
/*
* ntfs_security_init
*
* loads and parse $Secure
* ntfs_security_init - Load and parse $Secure.
*/
int ntfs_security_init(struct ntfs_sb_info *sbi)
{
@ -1940,9 +1930,9 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
sds_size = inode->i_size;
/* Find the last valid Id */
/* Find the last valid Id. */
sbi->security.next_id = SECURITY_ID_FIRST;
/* Always write new security at the end of bucket */
/* Always write new security at the end of bucket. */
sbi->security.next_off =
ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
@ -1975,9 +1965,7 @@ out:
}
/*
* ntfs_get_security_by_id
*
* reads security descriptor by id
* ntfs_get_security_by_id - Read security descriptor by id.
*/
int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
struct SECURITY_DESCRIPTOR_RELATIVE **sd,
@ -2010,7 +1998,7 @@ int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
goto out;
}
/* Try to find this SECURITY descriptor in SII indexes */
/* Try to find this SECURITY descriptor in SII indexes. */
err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
if (err)
@ -2026,9 +2014,7 @@ int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
}
if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
/*
* looks like too big security. 0x10000 - is arbitrary big number
*/
/* Looks like too big security. 0x10000 - is arbitrary big number. */
err = -EFBIG;
goto out;
}
@ -2071,9 +2057,7 @@ out:
}
/*
* ntfs_insert_security
*
* inserts security descriptor into $Secure::SDS
* ntfs_insert_security - Insert security descriptor into $Secure::SDS.
*
* SECURITY Descriptor Stream data is organized into chunks of 256K bytes
* and it contains a mirror copy of each security descriptor. When writing
@ -2114,7 +2098,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
*inserted = false;
*security_id = SECURITY_ID_INVALID;
/* Allocate a temporal buffer*/
/* Allocate a temporal buffer. */
d_security = kzalloc(aligned_sec_size, GFP_NOFS);
if (!d_security)
return -ENOMEM;
@ -2140,8 +2124,8 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
}
/*
* Check if such security already exists
* use "SDH" and hash -> to get the offset in "SDS"
* Check if such security already exists.
* Use "SDH" and hash -> to get the offset in "SDS".
*/
err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
&d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
@ -2161,7 +2145,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
d_security->key.hash == hash_key.hash &&
!memcmp(d_security + 1, sd, size_sd)) {
*security_id = d_security->key.sec_id;
/*such security already exists*/
/* Such security already exists. */
err = 0;
goto out;
}
@ -2176,17 +2160,17 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
break;
}
/* Zero unused space */
/* Zero unused space. */
next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
left = SecurityDescriptorsBlockSize - next;
/* Zero gap until SecurityDescriptorsBlockSize */
/* Zero gap until SecurityDescriptorsBlockSize. */
if (left < new_sec_size) {
/* zero "left" bytes from sbi->security.next_off */
/* Zero "left" bytes from sbi->security.next_off. */
sbi->security.next_off += SecurityDescriptorsBlockSize + left;
}
/* Zero tail of previous security */
/* Zero tail of previous security. */
//used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
/*
@ -2199,14 +2183,14 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
* zero "tozero" bytes from sbi->security.next_off - tozero
*/
/* format new security descriptor */
/* Format new security descriptor. */
d_security->key.hash = hash_key.hash;
d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
d_security->off = cpu_to_le64(sbi->security.next_off);
d_security->size = cpu_to_le32(new_sec_size);
memcpy(d_security + 1, sd, size_sd);
/* Write main SDS bucket */
/* Write main SDS bucket. */
err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
d_security, aligned_sec_size);
@ -2224,13 +2208,13 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
goto out;
}
/* Write copy SDS bucket */
/* Write copy SDS bucket. */
err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
aligned_sec_size);
if (err)
goto out;
/* Fill SII entry */
/* Fill SII entry. */
sii_e.de.view.data_off =
cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
@ -2246,7 +2230,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
if (err)
goto out;
/* Fill SDH entry */
/* Fill SDH entry. */
sdh_e.de.view.data_off =
cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
@ -2271,7 +2255,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
if (inserted)
*inserted = true;
/* Update Id and offset for next descriptor */
/* Update Id and offset for next descriptor. */
sbi->security.next_id += 1;
sbi->security.next_off += aligned_sec_size;
@ -2285,9 +2269,7 @@ out:
}
/*
* ntfs_reparse_init
*
* loads and parse $Extend/$Reparse
* ntfs_reparse_init - Load and parse $Extend/$Reparse.
*/
int ntfs_reparse_init(struct ntfs_sb_info *sbi)
{
@ -2325,9 +2307,7 @@ out:
}
/*
* ntfs_objid_init
*
* loads and parse $Extend/$ObjId
* ntfs_objid_init - Load and parse $Extend/$ObjId.
*/
int ntfs_objid_init(struct ntfs_sb_info *sbi)
{
@ -2449,14 +2429,14 @@ int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
goto out;
}
/* 1 - forces to ignore rkey.ReparseTag when comparing keys */
/* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
(struct NTFS_DE **)&re, fnd);
if (err)
goto out;
if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
/* Impossible. Looks like volume corrupt?*/
/* Impossible. Looks like volume corrupt? */
goto out;
}
@ -2528,9 +2508,7 @@ out:
}
/*
* run_deallocate
*
* deallocate clusters
* run_deallocate - Deallocate clusters.
*/
int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
{

File diff suppressed because it is too large Load Diff

View File

@ -20,9 +20,7 @@
#include "ntfs_fs.h"
/*
* ntfs_read_mft
*
* reads record and parses MFT
* ntfs_read_mft - Read record and parses MFT.
*/
static struct inode *ntfs_read_mft(struct inode *inode,
const struct cpu_str *name,
@ -91,7 +89,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
}
if (le32_to_cpu(rec->total) != sbi->record_size) {
// bad inode?
// Bad inode?
err = -EINVAL;
goto out;
}
@ -99,17 +97,17 @@ static struct inode *ntfs_read_mft(struct inode *inode,
if (!is_rec_base(rec))
goto Ok;
/* record should contain $I30 root */
/* Record should contain $I30 root. */
is_dir = rec->flags & RECORD_FLAG_DIR;
inode->i_generation = le16_to_cpu(rec->seq);
/* Enumerate all struct Attributes MFT */
/* Enumerate all struct Attributes MFT. */
le = NULL;
attr = NULL;
/*
* to reduce tab pressure use goto instead of
* To reduce tab pressure use goto instead of
* while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
*/
next_attr:
@ -120,7 +118,7 @@ next_attr:
goto end_enum;
if (le && le->vcn) {
/* This is non primary attribute segment. Ignore if not MFT */
/* This is non primary attribute segment. Ignore if not MFT. */
if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
goto next_attr;
@ -190,7 +188,7 @@ next_attr:
case ATTR_DATA:
if (is_dir) {
/* ignore data attribute in dir record */
/* Ignore data attribute in dir record. */
goto next_attr;
}
@ -204,7 +202,7 @@ next_attr:
(ino != MFT_REC_SECURE || !attr->non_res ||
attr->name_len != ARRAY_SIZE(SDS_NAME) ||
memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
/* file contains stream attribute. ignore it */
/* File contains stream attribute. Ignore it. */
goto next_attr;
}
@ -327,10 +325,10 @@ next_attr:
t32 = le16_to_cpu(attr->nres.run_off);
}
/* Looks like normal symlink */
/* Looks like normal symlink. */
ni->i_valid = inode->i_size;
/* Clear directory bit */
/* Clear directory bit. */
if (ni->ni_flags & NI_FLAG_DIR) {
indx_clear(&ni->dir);
memset(&ni->dir, 0, sizeof(ni->dir));
@ -342,7 +340,7 @@ next_attr:
is_dir = false;
if (attr->non_res) {
run = &ni->file.run;
goto attr_unpack_run; // double break
goto attr_unpack_run; // Double break.
}
break;
@ -388,7 +386,7 @@ end_enum:
goto out;
if (!is_match && name) {
/* reuse rec as buffer for ascii name */
/* Reuse rec as buffer for ascii name. */
err = -ENOENT;
goto out;
}
@ -407,9 +405,9 @@ end_enum:
ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
/*
* dot and dot-dot should be included in count but was not
* Dot and dot-dot should be included in count but was not
* included in enumeration.
* Usually a hard links to directories are disabled
* Usually a hard links to directories are disabled.
*/
inode->i_op = &ntfs_dir_inode_operations;
inode->i_fop = &ntfs_dir_operations;
@ -433,7 +431,7 @@ end_enum:
init_special_inode(inode, mode, inode->i_rdev);
} else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
/* Records in $Extend are not a files or general directories */
/* Records in $Extend are not a files or general directories. */
} else {
err = -EINVAL;
goto out;
@ -449,7 +447,7 @@ end_enum:
inode->i_mode = mode;
if (!(ni->ni_flags & NI_FLAG_EA)) {
/* if no xattr then no security (stored in xattr) */
/* If no xattr then no security (stored in xattr). */
inode->i_flags |= S_NOSEC;
}
@ -469,7 +467,11 @@ out:
return ERR_PTR(err);
}
/* returns 1 if match */
/*
* ntfs_test_inode
*
* Return: 1 if match.
*/
static int ntfs_test_inode(struct inode *inode, void *data)
{
struct MFT_REF *ref = data;
@ -499,7 +501,7 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
if (inode->i_state & I_NEW)
inode = ntfs_read_mft(inode, name, ref);
else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
/* inode overlaps? */
/* Inode overlaps? */
make_bad_inode(inode);
}
@ -530,18 +532,18 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
CLST vcn, lcn, len;
bool new;
/*clear previous state*/
/* Clear previous state. */
clear_buffer_new(bh);
clear_buffer_uptodate(bh);
/* direct write uses 'create=0'*/
/* Direct write uses 'create=0'. */
if (!create && vbo >= ni->i_valid) {
/* out of valid */
/* Out of valid. */
return 0;
}
if (vbo >= inode->i_size) {
/* out of size */
/* Out of size. */
return 0;
}
@ -593,7 +595,7 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
valid = ni->i_valid;
if (ctx == GET_BLOCK_DIRECT_IO_W) {
/*ntfs_direct_IO will update ni->i_valid */
/* ntfs_direct_IO will update ni->i_valid. */
if (vbo >= valid)
set_buffer_new(bh);
} else if (create) {
@ -609,17 +611,17 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
mark_inode_dirty(inode);
}
} else if (vbo >= valid) {
/* read out of valid data*/
/* should never be here 'cause already checked */
/* Read out of valid data. */
/* Should never be here 'cause already checked. */
clear_buffer_mapped(bh);
} else if (vbo + bytes <= valid) {
/* normal read */
/* Normal read. */
} else if (vbo + block_size <= valid) {
/* normal short read */
/* Normal short read. */
bytes = block_size;
} else {
/*
* read across valid size: vbo < valid && valid < vbo + block_size
* Read across valid size: vbo < valid && valid < vbo + block_size
*/
bytes = block_size;
@ -700,7 +702,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
return err;
}
/* normal + sparse files */
/* Normal + sparse files. */
return mpage_readpage(page, ntfs_get_block);
}
@ -713,12 +715,12 @@ static void ntfs_readahead(struct readahead_control *rac)
loff_t pos;
if (is_resident(ni)) {
/* no readahead for resident */
/* No readahead for resident. */
return;
}
if (is_compressed(ni)) {
/* no readahead for compressed */
/* No readahead for compressed. */
return;
}
@ -727,7 +729,7 @@ static void ntfs_readahead(struct readahead_control *rac)
if (valid < i_size_read(inode) && pos <= valid &&
valid < pos + readahead_length(rac)) {
/* range cross 'valid'. read it page by page */
/* Range cross 'valid'. Read it page by page. */
return;
}
@ -761,7 +763,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
if (is_resident(ni)) {
/*switch to buffered write*/
/* Switch to buffered write. */
ret = 0;
goto out;
}
@ -781,7 +783,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
mark_inode_dirty(inode);
}
} else if (vbo < valid && valid < end) {
/* fix page */
/* Fix page. */
iov_iter_revert(iter, end - valid);
iov_iter_zero(end - valid, iter);
}
@ -797,7 +799,7 @@ int ntfs_set_size(struct inode *inode, u64 new_size)
struct ntfs_inode *ni = ntfs_i(inode);
int err;
/* Check for maximum file size */
/* Check for maximum file size. */
if (is_sparsed(ni) || is_compressed(ni)) {
if (new_size > sbi->maxbytes_sparse) {
err = -EFBIG;
@ -848,7 +850,7 @@ static int ntfs_writepages(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
/* redirect call to 'ntfs_writepage' for resident files*/
/* Redirect call to 'ntfs_writepage' for resident files. */
get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
return mpage_writepages(mapping, wbc, get_block);
@ -901,7 +903,9 @@ out:
return err;
}
/* address_space_operations::write_end */
/*
* ntfs_write_end - Address_space_operations::write_end.
*/
static int ntfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, u32 len, u32 copied, struct page *page,
void *fsdata)
@ -919,7 +923,7 @@ static int ntfs_write_end(struct file *file, struct address_space *mapping,
ni_unlock(ni);
if (!err) {
dirty = true;
/* clear any buffers in page*/
/* Clear any buffers in page. */
if (page_has_buffers(page)) {
struct buffer_head *head, *bh;
@ -948,7 +952,7 @@ static int ntfs_write_end(struct file *file, struct address_space *mapping,
}
if (valid != ni->i_valid) {
/* ni->i_valid is changed in ntfs_get_block_vbo */
/* ni->i_valid is changed in ntfs_get_block_vbo. */
dirty = true;
}
@ -1009,10 +1013,11 @@ int ntfs_sync_inode(struct inode *inode)
}
/*
* helper function for ntfs_flush_inodes. This writes both the inode
* and the file data blocks, waiting for in flight data blocks before
* the start of the call. It does not wait for any io started
* during the call
* writeback_inode - Helper function for ntfs_flush_inodes().
*
* This writes both the inode and the file data blocks, waiting
* for in flight data blocks before the start of the call. It
* does not wait for any io started during the call.
*/
static int writeback_inode(struct inode *inode)
{
@ -1024,12 +1029,14 @@ static int writeback_inode(struct inode *inode)
}
/*
* write data and metadata corresponding to i1 and i2. The io is
* ntfs_flush_inodes
*
* Write data and metadata corresponding to i1 and i2. The io is
* started but we do not wait for any of it to finish.
*
* filemap_flush is used for the block device, so if there is a dirty
* filemap_flush() is used for the block device, so if there is a dirty
* page for a block already in flight, we will not wait and start the
* io over again
* io over again.
*/
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
struct inode *i2)
@ -1049,7 +1056,7 @@ int inode_write_data(struct inode *inode, const void *data, size_t bytes)
{
pgoff_t idx;
/* Write non resident data */
/* Write non resident data. */
for (idx = 0; bytes; idx++) {
size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
struct page *page = ntfs_map_page(inode->i_mapping, idx);
@ -1076,12 +1083,14 @@ int inode_write_data(struct inode *inode, const void *data, size_t bytes)
}
/*
* number of bytes to for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
* for unicode string of 'uni_len' length
* ntfs_reparse_bytes
*
* Number of bytes to for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
* for unicode string of @uni_len length.
*/
static inline u32 ntfs_reparse_bytes(u32 uni_len)
{
/* header + unicode string + decorated unicode string */
/* Header + unicode string + decorated unicode string. */
return sizeof(short) * (2 * uni_len + 4) +
offsetof(struct REPARSE_DATA_BUFFER,
SymbolicLinkReparseBuffer.PathBuffer);
@ -1103,14 +1112,14 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
rs = &rp->SymbolicLinkReparseBuffer;
rp_name = rs->PathBuffer;
/* Convert link name to utf16 */
/* Convert link name to UTF-16. */
err = ntfs_nls_to_utf16(sbi, symname, size,
(struct cpu_str *)(rp_name - 1), 2 * size,
UTF16_LITTLE_ENDIAN);
if (err < 0)
goto out;
/* err = the length of unicode name of symlink */
/* err = the length of unicode name of symlink. */
*nsize = ntfs_reparse_bytes(err);
if (*nsize > sbi->reparse.max_size) {
@ -1118,7 +1127,7 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
goto out;
}
/* translate linux '/' into windows '\' */
/* Translate Linux '/' into Windows '\'. */
for (i = 0; i < err; i++) {
if (rp_name[i] == cpu_to_le16('/'))
rp_name[i] = cpu_to_le16('\\');
@ -1129,20 +1138,21 @@ ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
SymbolicLinkReparseBuffer));
/* PrintName + SubstituteName */
/* PrintName + SubstituteName. */
rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
rs->PrintNameLength = rs->SubstituteNameOffset;
/*
* TODO: use relative path if possible to allow windows to parse this path
* 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE)
* TODO: Use relative path if possible to allow Windows to
* parse this path.
* 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
*/
rs->Flags = 0;
memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
/* decorate SubstituteName */
/* Decorate SubstituteName. */
rp_name += err;
rp_name[0] = cpu_to_le16('\\');
rp_name[1] = cpu_to_le16('?');
@ -1204,13 +1214,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
fa = FILE_ATTRIBUTE_REPARSE_POINT;
/*
* linux: there are dir/file/symlink and so on
* NTFS: symlinks are "dir + reparse" or "file + reparse"
* linux: there are dir/file/symlink and so on.
* NTFS: symlinks are "dir + reparse" or "file + reparse".
* It is good idea to create:
* dir + reparse if 'symname' points to directory
* or
* file + reparse if 'symname' points to file
* Unfortunately kern_path hangs if symname contains 'dir'
* Unfortunately kern_path hangs if symname contains 'dir'.
*/
/*
@ -1229,14 +1239,14 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
*/
} else if (S_ISREG(mode)) {
if (sbi->options.sparse) {
/* sparsed regular file, cause option 'sparse' */
/* Sparsed regular file, cause option 'sparse'. */
fa = FILE_ATTRIBUTE_SPARSE_FILE |
FILE_ATTRIBUTE_ARCHIVE;
} else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
/* compressed regular file, if parent is compressed */
/* Compressed regular file, if parent is compressed. */
fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
} else {
/* regular file, default attributes */
/* Regular file, default attributes. */
fa = FILE_ATTRIBUTE_ARCHIVE;
}
} else {
@ -1246,17 +1256,17 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
if (!(mode & 0222))
fa |= FILE_ATTRIBUTE_READONLY;
/* allocate PATH_MAX bytes */
/* Allocate PATH_MAX bytes. */
new_de = __getname();
if (!new_de) {
err = -ENOMEM;
goto out1;
}
/*mark rw ntfs as dirty. it will be cleared at umount*/
/* Mark rw ntfs as dirty. it will be cleared at umount. */
ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
/* Step 1: allocate and fill new mft record */
/* Step 1: allocate and fill new mft record. */
err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
if (err)
goto out2;
@ -1277,7 +1287,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
rec->hard_links = cpu_to_le16(1);
attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
/* Get default security id */
/* Get default security id. */
sd = s_default_security;
sd_size = sizeof(s_default_security);
@ -1293,7 +1303,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
}
}
/* Insert standard info */
/* Insert standard info. */
std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
if (security_id == SECURITY_ID_INVALID) {
@ -1319,7 +1329,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
attr = Add2Ptr(attr, asize);
/* Insert file name */
/* Insert file name. */
err = fill_name_de(sbi, new_de, name, uni);
if (err)
goto out4;
@ -1348,7 +1358,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
attr = Add2Ptr(attr, asize);
if (security_id == SECURITY_ID_INVALID) {
/* Insert security attribute */
/* Insert security attribute. */
asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
attr->type = ATTR_SECURE;
@ -1363,8 +1373,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
if (fa & FILE_ATTRIBUTE_DIRECTORY) {
/*
* regular directory or symlink to directory
* Create root attribute
* Regular directory or symlink to directory.
* Create root attribute.
*/
dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
@ -1394,12 +1404,12 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
e->flags = NTFS_IE_LAST;
} else if (S_ISLNK(mode)) {
/*
* symlink to file
* Create empty resident data attribute
* Symlink to file.
* Create empty resident data attribute.
*/
asize = SIZEOF_RESIDENT;
/* insert empty ATTR_DATA */
/* Insert empty ATTR_DATA */
attr->type = ATTR_DATA;
attr->size = cpu_to_le32(SIZEOF_RESIDENT);
attr->id = cpu_to_le16(aid++);
@ -1407,13 +1417,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
attr->res.data_off = SIZEOF_RESIDENT_LE;
} else {
/*
* regular file or node
* Regular file or node.
*/
attr->type = ATTR_DATA;
attr->id = cpu_to_le16(aid++);
if (S_ISREG(mode)) {
/* Create empty non resident data attribute */
/* Create empty non resident data attribute. */
attr->non_res = 1;
attr->nres.evcn = cpu_to_le64(-1ll);
if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
@ -1437,7 +1447,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
}
attr->nres.run_off = attr->name_off;
} else {
/* Create empty resident data attribute */
/* Create empty resident data attribute. */
attr->size = cpu_to_le32(SIZEOF_RESIDENT);
attr->name_off = SIZEOF_RESIDENT_LE;
if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
@ -1465,13 +1475,13 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
}
/*
* Insert ATTR_REPARSE
* Insert ATTR_REPARSE.
*/
attr = Add2Ptr(attr, asize);
attr->type = ATTR_REPARSE;
attr->id = cpu_to_le16(aid++);
/* resident or non resident? */
/* Resident or non resident? */
asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
t16 = PtrOffset(rec, attr);
@ -1479,7 +1489,7 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
CLST alen;
CLST clst = bytes_to_cluster(sbi, nsize);
/* bytes per runs */
/* Bytes per runs. */
t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
attr->non_res = 1;
@ -1534,12 +1544,12 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
rec->next_attr_id = cpu_to_le16(aid);
/* Step 2: Add new name in index */
/* Step 2: Add new name in index. */
err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd);
if (err)
goto out6;
/* Update current directory record */
/* Update current directory record. */
mark_inode_dirty(dir);
inode->i_generation = le16_to_cpu(rec->seq);
@ -1577,26 +1587,29 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
inode->i_flags |= S_NOSEC;
}
/* Write non resident data */
/* Write non resident data. */
if (nsize) {
err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
if (err)
goto out7;
}
/* call 'd_instantiate' after inode->i_op is set but before finish_open */
/*
* Call 'd_instantiate' after inode->i_op is set
* but before finish_open.
*/
d_instantiate(dentry, inode);
ntfs_save_wsl_perm(inode);
mark_inode_dirty(inode);
mark_inode_dirty(dir);
/* normal exit */
/* Normal exit. */
goto out2;
out7:
/* undo 'indx_insert_entry' */
/* Undo 'indx_insert_entry'. */
indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
le16_to_cpu(new_de->key_size), sbi);
out6:
@ -1649,15 +1662,15 @@ int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
if (!dir_root)
return -EINVAL;
/* allocate PATH_MAX bytes */
/* Allocate PATH_MAX bytes. */
new_de = __getname();
if (!new_de)
return -ENOMEM;
/*mark rw ntfs as dirty. it will be cleared at umount*/
/* Mark rw ntfs as dirty. It will be cleared at umount. */
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
// Insert file name
/* Insert file name. */
err = fill_name_de(sbi, new_de, name, NULL);
if (err)
goto out;
@ -1731,23 +1744,23 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
goto out1;
}
/* allocate PATH_MAX bytes */
/* Allocate PATH_MAX bytes. */
uni = __getname();
if (!uni) {
err = -ENOMEM;
goto out1;
}
/* Convert input string to unicode */
/* Convert input string to unicode. */
err = ntfs_nls_to_utf16(sbi, name->name, name->len, uni, NTFS_NAME_LEN,
UTF16_HOST_ENDIAN);
if (err < 0)
goto out2;
/*mark rw ntfs as dirty. it will be cleared at umount*/
/* Mark rw ntfs as dirty. It will be cleared at umount. */
ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
/* find name in record */
/* Find name in record. */
mi_get_ref(&dir_ni->mi, &ref);
le = NULL;
@ -1764,14 +1777,14 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
if (err)
goto out3;
/* Then remove name from mft */
/* Then remove name from MFT. */
ni_remove_attr_le(ni, attr_from_name(fname), le);
le16_add_cpu(&ni->mi.mrec->hard_links, -1);
ni->mi.dirty = true;
if (name_type != FILE_NAME_POSIX) {
/* Now we should delete name by type */
/* Now we should delete name by type. */
fname = ni_fname_type(ni, name_type, &le);
if (fname) {
err = indx_delete_entry(indx, dir_ni, fname,
@ -1837,13 +1850,13 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
struct le_str *uni;
struct ATTRIB *attr;
/* Reparse data present. Try to parse it */
/* Reparse data present. Try to parse it. */
static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
*buffer = 0;
/* Read into temporal buffer */
/* Read into temporal buffer. */
if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
err = -EINVAL;
goto out;
@ -1875,10 +1888,10 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
err = -EINVAL;
/* Microsoft Tag */
/* Microsoft Tag. */
switch (rp->ReparseTag) {
case IO_REPARSE_TAG_MOUNT_POINT:
/* Mount points and junctions */
/* Mount points and junctions. */
/* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
MountPointReparseBuffer.PathBuffer))
@ -1940,20 +1953,20 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
goto out;
}
/* Users tag */
/* Users tag. */
uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
nlen = le16_to_cpu(rp->ReparseDataLength) -
sizeof(struct REPARSE_POINT);
}
/* Convert nlen from bytes to UNICODE chars */
/* Convert nlen from bytes to UNICODE chars. */
nlen >>= 1;
/* Check that name is available */
/* Check that name is available. */
if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
goto out;
/* If name is already zero terminated then truncate it now */
/* If name is already zero terminated then truncate it now. */
if (!uni->name[nlen - 1])
nlen -= 1;
uni->len = nlen;
@ -1963,13 +1976,13 @@ static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
if (err < 0)
goto out;
/* translate windows '\' into linux '/' */
/* Translate Windows '\' into Linux '/'. */
for (i = 0; i < err; i++) {
if (buffer[i] == '\\')
buffer[i] = '/';
}
/* Always set last zero */
/* Always set last zero. */
buffer[err] = 0;
out:
kfree(to_free);

View File

@ -4,6 +4,7 @@
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
*/
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
@ -14,7 +15,7 @@
#include "ntfs_fs.h"
// clang-format off
/* src buffer is zero */
/* Src buffer is zero. */
#define LZNT_ERROR_ALL_ZEROS 1
#define LZNT_CHUNK_SIZE 0x1000
// clang-format on
@ -72,7 +73,7 @@ static size_t longest_match_std(const u8 *src, struct lznt *ctx)
hash[1] + 3, ctx->max_len - 3);
}
/* Compare two matches and select the best one */
/* Compare two matches and select the best one. */
if (len1 < len2) {
ctx->best_match = hash[1];
len1 = len2;
@ -129,10 +130,10 @@ static inline size_t parse_pair(u16 pair, size_t *offset, size_t index)
/*
* compress_chunk
*
* returns one of the three values:
* 0 - ok, 'cmpr' contains 'cmpr_chunk_size' bytes of compressed data
* 1 - input buffer is full zero
* -2 - the compressed buffer is too small to hold the compressed data
* Return:
* * 0 - Ok, @cmpr contains @cmpr_chunk_size bytes of compressed data.
* * 1 - Input buffer is full zero.
* * -2 - The compressed buffer is too small to hold the compressed data.
*/
static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
const u8 *unc, const u8 *unc_end, u8 *cmpr,
@ -145,7 +146,7 @@ static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
u8 *cp = cmpr + 3;
u8 *cp2 = cmpr + 2;
u8 not_zero = 0;
/* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ) */
/* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ). */
u8 ohdr = 0;
u8 *last;
u16 t16;
@ -165,7 +166,7 @@ static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
while (unc + s_max_off[idx] < up)
ctx->max_len = s_max_len[++idx];
// Find match
/* Find match. */
max_len = up + 3 <= unc_end ? (*match)(up, ctx) : 0;
if (!max_len) {
@ -211,7 +212,7 @@ NotCompressed:
return -2;
/*
* Copy non cmpr data
* Copy non cmpr data.
* 0x3FFF == ((LZNT_CHUNK_SIZE + 2 - 3) | 0x3000)
*/
cmpr[0] = 0xff;
@ -233,38 +234,38 @@ static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
u16 pair;
size_t offset, length;
/* Do decompression until pointers are inside range */
/* Do decompression until pointers are inside range. */
while (up < unc_end && cmpr < cmpr_end) {
/* Correct index */
while (unc + s_max_off[index] < up)
index += 1;
/* Check the current flag for zero */
/* Check the current flag for zero. */
if (!(ch & (1 << bit))) {
/* Just copy byte */
/* Just copy byte. */
*up++ = *cmpr++;
goto next;
}
/* Check for boundary */
/* Check for boundary. */
if (cmpr + 1 >= cmpr_end)
return -EINVAL;
/* Read a short from little endian stream */
/* Read a short from little endian stream. */
pair = cmpr[1];
pair <<= 8;
pair |= cmpr[0];
cmpr += 2;
/* Translate packed information into offset and length */
/* Translate packed information into offset and length. */
length = parse_pair(pair, &offset, index);
/* Check offset for boundary */
/* Check offset for boundary. */
if (unc + offset > up)
return -EINVAL;
/* Truncate the length if necessary */
/* Truncate the length if necessary. */
if (up + length >= unc_end)
length = unc_end - up;
@ -273,7 +274,7 @@ static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
*up = *(up - offset);
next:
/* Advance flag bit value */
/* Advance flag bit value. */
bit = (bit + 1) & 7;
if (!bit) {
@ -284,13 +285,14 @@ next:
}
}
/* return the size of uncompressed data */
/* Return the size of uncompressed data. */
return up - unc;
}
/*
* 0 - standard compression
* !0 - best compression, requires a lot of cpu
* get_lznt_ctx
* @level: 0 - Standard compression.
* !0 - Best compression, requires a lot of cpu.
*/
struct lznt *get_lznt_ctx(int level)
{
@ -303,11 +305,11 @@ struct lznt *get_lznt_ctx(int level)
}
/*
* compress_lznt
* compress_lznt - Compresses @unc into @cmpr
*
* Compresses "unc" into "cmpr"
* +x - ok, 'cmpr' contains 'final_compressed_size' bytes of compressed data
* 0 - input buffer is full zero
* Return:
* * +x - Ok, @cmpr contains 'final_compressed_size' bytes of compressed data.
* * 0 - Input buffer is full zero.
*/
size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
size_t cmpr_size, struct lznt *ctx)
@ -327,7 +329,7 @@ size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
match = &longest_match_best;
}
/* compression cycle */
/* Compression cycle. */
for (; unc_chunk < unc_end; unc_chunk += LZNT_CHUNK_SIZE) {
cmpr_size = 0;
err = compress_chunk(match, unc_chunk, unc_end, p, end,
@ -348,9 +350,7 @@ size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
}
/*
* decompress_lznt
*
* decompresses "cmpr" into "unc"
* decompress_lznt - Decompress @cmpr into @unc.
*/
ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
size_t unc_size)
@ -364,24 +364,24 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
if (cmpr_size < sizeof(short))
return -EINVAL;
/* read chunk header */
/* Read chunk header. */
chunk_hdr = cmpr_chunk[1];
chunk_hdr <<= 8;
chunk_hdr |= cmpr_chunk[0];
/* loop through decompressing chunks */
/* Loop through decompressing chunks. */
for (;;) {
size_t chunk_size_saved;
size_t unc_use;
size_t cmpr_use = 3 + (chunk_hdr & (LZNT_CHUNK_SIZE - 1));
/* Check that the chunk actually fits the supplied buffer */
/* Check that the chunk actually fits the supplied buffer. */
if (cmpr_chunk + cmpr_use > cmpr_end)
return -EINVAL;
/* First make sure the chunk contains compressed data */
/* First make sure the chunk contains compressed data. */
if (chunk_hdr & 0x8000) {
/* Decompress a chunk and return if we get an error */
/* Decompress a chunk and return if we get an error. */
ssize_t err =
decompress_chunk(unc_chunk, unc_end,
cmpr_chunk + sizeof(chunk_hdr),
@ -390,7 +390,7 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
return err;
unc_use = err;
} else {
/* This chunk does not contain compressed data */
/* This chunk does not contain compressed data. */
unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end
? unc_end - unc_chunk
: LZNT_CHUNK_SIZE;
@ -404,21 +404,21 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
unc_use);
}
/* Advance pointers */
/* Advance pointers. */
cmpr_chunk += cmpr_use;
unc_chunk += unc_use;
/* Check for the end of unc buffer */
/* Check for the end of unc buffer. */
if (unc_chunk >= unc_end)
break;
/* Proceed the next chunk */
/* Proceed the next chunk. */
if (cmpr_chunk > cmpr_end - 2)
break;
chunk_size_saved = LZNT_CHUNK_SIZE;
/* read chunk header */
/* Read chunk header. */
chunk_hdr = cmpr_chunk[1];
chunk_hdr <<= 8;
chunk_hdr |= cmpr_chunk[0];
@ -426,12 +426,12 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
if (!chunk_hdr)
break;
/* Check the size of unc buffer */
/* Check the size of unc buffer. */
if (unc_use < chunk_size_saved) {
size_t t1 = chunk_size_saved - unc_use;
u8 *t2 = unc_chunk + t1;
/* 'Zero' memory */
/* 'Zero' memory. */
if (t2 >= unc_end)
break;
@ -440,13 +440,13 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
}
}
/* Check compression boundary */
/* Check compression boundary. */
if (cmpr_chunk > cmpr_end)
return -EINVAL;
/*
* The unc size is just a difference between current
* pointer and original one
* pointer and original one.
*/
return PtrOffset(unc, unc_chunk);
}

View File

@ -17,9 +17,7 @@
#include "ntfs_fs.h"
/*
* fill_name_de
*
* formats NTFS_DE in 'buf'
* fill_name_de - Format NTFS_DE in @buf.
*/
int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
const struct cpu_str *uni)
@ -46,7 +44,7 @@ int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
fname->name_len = uni->len;
} else {
/* Convert input string to unicode */
/* Convert input string to unicode. */
err = ntfs_nls_to_utf16(sbi, name->name, name->len,
(struct cpu_str *)&fname->name_len,
NTFS_NAME_LEN, UTF16_LITTLE_ENDIAN);
@ -66,9 +64,7 @@ int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
}
/*
* ntfs_lookup
*
* inode_operations::lookup
* ntfs_lookup - inode_operations::lookup
*/
static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
u32 flags)
@ -98,9 +94,7 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
}
/*
* ntfs_create
*
* inode_operations::create
* ntfs_create - inode_operations::create
*/
static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl)
@ -140,9 +134,7 @@ static int ntfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
}
/*
* ntfs_link
*
* inode_operations::link
* ntfs_link - inode_operations::link
*/
static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
{
@ -182,9 +174,7 @@ static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
}
/*
* ntfs_unlink
*
* inode_operations::unlink
* ntfs_unlink - inode_operations::unlink
*/
static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
{
@ -201,9 +191,7 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
}
/*
* ntfs_symlink
*
* inode_operations::symlink
* ntfs_symlink - inode_operations::symlink
*/
static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, const char *symname)
@ -223,9 +211,7 @@ static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
}
/*
* ntfs_mkdir
*
* inode_operations::mkdir
* ntfs_mkdir- inode_operations::mkdir
*/
static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
struct dentry *dentry, umode_t mode)
@ -244,9 +230,7 @@ static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
}
/*
* ntfs_rmdir
*
* inode_operations::rm_dir
* ntfs_rmdir - inode_operations::rm_dir
*/
static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
{
@ -263,9 +247,7 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
}
/*
* ntfs_rename
*
* inode_operations::rename
* ntfs_rename - inode_operations::rename
*/
static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
@ -304,7 +286,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
old_dentry->d_name.len);
if (is_same && old_dir == new_dir) {
/* Nothing to do */
/* Nothing to do. */
err = 0;
goto out;
}
@ -315,7 +297,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
}
if (new_inode) {
/*target name exists. unlink it*/
/* Target name exists. Unlink it. */
dget(new_dentry);
ni_lock_dir(new_dir_ni);
err = ntfs_unlink_inode(new_dir, new_dentry);
@ -325,7 +307,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
goto out;
}
/* allocate PATH_MAX bytes */
/* Allocate PATH_MAX bytes. */
old_de = __getname();
if (!old_de) {
err = -ENOMEM;
@ -352,7 +334,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
mi_get_ref(&old_dir_ni->mi, &old_name->home);
/*get pointer to file_name in mft*/
/* Get pointer to file_name in MFT. */
fname = ni_fname_name(old_ni, (struct cpu_str *)&old_name->name_len,
&old_name->home, &le);
if (!fname) {
@ -360,19 +342,19 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
goto out2;
}
/* Copy fname info from record into new fname */
/* Copy fname info from record into new fname. */
new_name = (struct ATTR_FILE_NAME *)(new_de + 1);
memcpy(&new_name->dup, &fname->dup, sizeof(fname->dup));
name_type = paired_name(fname->type);
/* remove first name from directory */
/* Remove first name from directory. */
err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
le16_to_cpu(old_de->key_size), sbi);
if (err)
goto out3;
/* remove first name from mft */
/* Remove first name from MFT. */
err = ni_remove_attr_le(old_ni, attr_from_name(fname), le);
if (err)
goto out4;
@ -381,17 +363,17 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
old_ni->mi.dirty = true;
if (name_type != FILE_NAME_POSIX) {
/* get paired name */
/* Get paired name. */
fname = ni_fname_type(old_ni, name_type, &le);
if (fname) {
/* remove second name from directory */
/* Remove second name from directory. */
err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
fname, fname_full_size(fname),
sbi);
if (err)
goto out5;
/* remove second name from mft */
/* Remove second name from MFT. */
err = ni_remove_attr_le(old_ni, attr_from_name(fname),
le);
if (err)
@ -402,13 +384,13 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
}
}
/* Add new name */
/* Add new name. */
mi_get_ref(&old_ni->mi, &new_de->ref);
mi_get_ref(&ntfs_i(new_dir)->mi, &new_name->home);
new_de_key_size = le16_to_cpu(new_de->key_size);
/* insert new name in mft */
/* Insert new name in MFT. */
err = ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
&attr, NULL);
if (err)
@ -421,7 +403,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
le16_add_cpu(&old_ni->mi.mrec->hard_links, 1);
old_ni->mi.dirty = true;
/* insert new name in directory */
/* Insert new name in directory. */
err = indx_insert_entry(&new_dir_ni->dir, new_dir_ni, new_de, sbi,
NULL);
if (err)
@ -449,7 +431,7 @@ static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
}
err = 0;
/* normal way */
/* Normal way* */
goto out2;
out8:

View File

@ -10,33 +10,24 @@
#ifndef _LINUX_NTFS3_NTFS_H
#define _LINUX_NTFS3_NTFS_H
/* TODO:
* - Check 4K mft record and 512 bytes cluster
*/
/* TODO: Check 4K MFT record and 512 bytes cluster. */
/*
* Activate this define to use binary search in indexes
*/
/* Activate this define to use binary search in indexes. */
#define NTFS3_INDEX_BINARY_SEARCH
/*
* Check each run for marked clusters
*/
/* Check each run for marked clusters. */
#define NTFS3_CHECK_FREE_CLST
#define NTFS_NAME_LEN 255
/*
* ntfs.sys used 500 maximum links
* on-disk struct allows up to 0xffff
*/
/* ntfs.sys used 500 maximum links on-disk struct allows up to 0xffff. */
#define NTFS_LINK_MAX 0x400
//#define NTFS_LINK_MAX 0xffff
/*
* Activate to use 64 bit clusters instead of 32 bits in ntfs.sys
* Logical and virtual cluster number
* If needed, may be redefined to use 64 bit value
* Activate to use 64 bit clusters instead of 32 bits in ntfs.sys.
* Logical and virtual cluster number if needed, may be
* redefined to use 64 bit value.
*/
//#define CONFIG_NTFS3_64BIT_CLUSTER
@ -52,10 +43,10 @@ struct GUID {
};
/*
* this struct repeats layout of ATTR_FILE_NAME
* at offset 0x40
* it used to store global constants NAME_MFT/NAME_MIRROR...
* most constant names are shorter than 10
* This struct repeats layout of ATTR_FILE_NAME
* at offset 0x40.
* It used to store global constants NAME_MFT/NAME_MIRROR...
* most constant names are shorter than 10.
*/
struct cpu_str {
u8 len;
@ -178,11 +169,11 @@ extern const __le16 BAD_NAME[4];
extern const __le16 SDS_NAME[4];
extern const __le16 WOF_NAME[17]; /* WofCompressedData */
/* MFT record number structure */
/* MFT record number structure. */
struct MFT_REF {
__le32 low; // The low part of the number
__le16 high; // The high part of the number
__le16 seq; // The sequence number of MFT record
__le32 low; // The low part of the number.
__le16 high; // The high part of the number.
__le16 seq; // The sequence number of MFT record.
};
static_assert(sizeof(__le64) == sizeof(struct MFT_REF));
@ -197,36 +188,36 @@ static inline CLST ino_get(const struct MFT_REF *ref)
}
struct NTFS_BOOT {
u8 jump_code[3]; // 0x00: Jump to boot code
u8 jump_code[3]; // 0x00: Jump to boot code.
u8 system_id[8]; // 0x03: System ID, equals "NTFS "
// NOTE: this member is not aligned(!)
// bytes_per_sector[0] must be 0
// bytes_per_sector[1] must be multiplied by 256
u8 bytes_per_sector[2]; // 0x0B: Bytes per sector
// NOTE: This member is not aligned(!)
// bytes_per_sector[0] must be 0.
// bytes_per_sector[1] must be multiplied by 256.
u8 bytes_per_sector[2]; // 0x0B: Bytes per sector.
u8 sectors_per_clusters;// 0x0D: Sectors per cluster
u8 sectors_per_clusters;// 0x0D: Sectors per cluster.
u8 unused1[7];
u8 media_type; // 0x15: Media type (0xF8 - harddisk)
u8 unused2[2];
__le16 sct_per_track; // 0x18: number of sectors per track
__le16 heads; // 0x1A: number of heads per cylinder
__le32 hidden_sectors; // 0x1C: number of 'hidden' sectors
__le16 sct_per_track; // 0x18: number of sectors per track.
__le16 heads; // 0x1A: number of heads per cylinder.
__le32 hidden_sectors; // 0x1C: number of 'hidden' sectors.
u8 unused3[4];
u8 bios_drive_num; // 0x24: BIOS drive number =0x80
u8 bios_drive_num; // 0x24: BIOS drive number =0x80.
u8 unused4;
u8 signature_ex; // 0x26: Extended BOOT signature =0x80
u8 signature_ex; // 0x26: Extended BOOT signature =0x80.
u8 unused5;
__le64 sectors_per_volume;// 0x28: size of volume in sectors
__le64 mft_clst; // 0x30: first cluster of $MFT
__le64 mft2_clst; // 0x38: first cluster of $MFTMirr
s8 record_size; // 0x40: size of MFT record in clusters(sectors)
__le64 sectors_per_volume;// 0x28: Size of volume in sectors.
__le64 mft_clst; // 0x30: First cluster of $MFT
__le64 mft2_clst; // 0x38: First cluster of $MFTMirr
s8 record_size; // 0x40: Size of MFT record in clusters(sectors).
u8 unused6[3];
s8 index_size; // 0x44: size of INDX record in clusters(sectors)
s8 index_size; // 0x44: Size of INDX record in clusters(sectors).
u8 unused7[3];
__le64 serial_num; // 0x48: Volume serial number
__le32 check_sum; // 0x50: Simple additive checksum of all
// of the u32's which precede the 'check_sum'
// of the u32's which precede the 'check_sum'.
u8 boot_code[0x200 - 0x50 - 2 - 4]; // 0x54:
u8 boot_magic[2]; // 0x1FE: Boot signature =0x55 + 0xAA
@ -247,13 +238,13 @@ enum NTFS_SIGNATURE {
static_assert(sizeof(enum NTFS_SIGNATURE) == 4);
/* MFT Record header structure */
/* MFT Record header structure. */
struct NTFS_RECORD_HEADER {
/* Record magic number, equals 'FILE'/'INDX'/'RSTR'/'RCRD' */
/* Record magic number, equals 'FILE'/'INDX'/'RSTR'/'RCRD'. */
enum NTFS_SIGNATURE sign; // 0x00:
__le16 fix_off; // 0x04:
__le16 fix_num; // 0x06:
__le64 lsn; // 0x08: Log file sequence number
__le64 lsn; // 0x08: Log file sequence number,
};
static_assert(sizeof(struct NTFS_RECORD_HEADER) == 0x10);
@ -263,7 +254,7 @@ static inline int is_baad(const struct NTFS_RECORD_HEADER *hdr)
return hdr->sign == NTFS_BAAD_SIGNATURE;
}
/* Possible bits in struct MFT_REC.flags */
/* Possible bits in struct MFT_REC.flags. */
enum RECORD_FLAG {
RECORD_FLAG_IN_USE = cpu_to_le16(0x0001),
RECORD_FLAG_DIR = cpu_to_le16(0x0002),
@ -271,22 +262,22 @@ enum RECORD_FLAG {
RECORD_FLAG_UNKNOWN = cpu_to_le16(0x0008),
};
/* MFT Record structure */
/* MFT Record structure, */
struct MFT_REC {
struct NTFS_RECORD_HEADER rhdr; // 'FILE'
__le16 seq; // 0x10: Sequence number for this record
__le16 hard_links; // 0x12: The number of hard links to record
__le16 attr_off; // 0x14: Offset to attributes
__le16 flags; // 0x16: See RECORD_FLAG
__le32 used; // 0x18: The size of used part
__le32 total; // 0x1C: Total record size
__le16 seq; // 0x10: Sequence number for this record.
__le16 hard_links; // 0x12: The number of hard links to record.
__le16 attr_off; // 0x14: Offset to attributes.
__le16 flags; // 0x16: See RECORD_FLAG.
__le32 used; // 0x18: The size of used part.
__le32 total; // 0x1C: Total record size.
struct MFT_REF parent_ref; // 0x20: Parent MFT record
__le16 next_attr_id; // 0x28: The next attribute Id
struct MFT_REF parent_ref; // 0x20: Parent MFT record.
__le16 next_attr_id; // 0x28: The next attribute Id.
__le16 res; // 0x2A: High part of mft record?
__le32 mft_record; // 0x2C: Current mft record number
__le16 res; // 0x2A: High part of MFT record?
__le32 mft_record; // 0x2C: Current MFT record number.
__le16 fixups[]; // 0x30:
};
@ -323,16 +314,16 @@ static inline bool clear_rec_inuse(struct MFT_REC *rec)
#define RESIDENT_FLAG_INDEXED 0x01
struct ATTR_RESIDENT {
__le32 data_size; // 0x10: The size of data
__le16 data_off; // 0x14: Offset to data
u8 flags; // 0x16: resident flags ( 1 - indexed )
__le32 data_size; // 0x10: The size of data.
__le16 data_off; // 0x14: Offset to data.
u8 flags; // 0x16: Resident flags ( 1 - indexed ).
u8 res; // 0x17:
}; // sizeof() = 0x18
struct ATTR_NONRESIDENT {
__le64 svcn; // 0x10: Starting VCN of this segment
__le64 evcn; // 0x18: End VCN of this segment
__le16 run_off; // 0x20: Offset to packed runs
__le64 svcn; // 0x10: Starting VCN of this segment.
__le64 evcn; // 0x18: End VCN of this segment.
__le16 run_off; // 0x20: Offset to packed runs.
// Unit of Compression size for this stream, expressed
// as a log of the cluster size.
//
@ -345,13 +336,13 @@ struct ATTR_NONRESIDENT {
// reasonable range of legal values here (1-5?),
// even if the implementation only generates
// a smaller set of values itself.
u8 c_unit; // 0x22
u8 c_unit; // 0x22:
u8 res1[5]; // 0x23:
__le64 alloc_size; // 0x28: The allocated size of attribute in bytes
__le64 alloc_size; // 0x28: The allocated size of attribute in bytes.
// (multiple of cluster size)
__le64 data_size; // 0x30: The size of attribute in bytes <= alloc_size
__le64 valid_size; // 0x38: The size of valid part in bytes <= data_size
__le64 total_size; // 0x40: The sum of the allocated clusters for a file
__le64 data_size; // 0x30: The size of attribute in bytes <= alloc_size.
__le64 valid_size; // 0x38: The size of valid part in bytes <= data_size.
__le64 total_size; // 0x40: The sum of the allocated clusters for a file.
// (present only for the first segment (0 == vcn)
// of compressed attribute)
@ -364,13 +355,13 @@ struct ATTR_NONRESIDENT {
#define ATTR_FLAG_SPARSED cpu_to_le16(0x8000)
struct ATTRIB {
enum ATTR_TYPE type; // 0x00: The type of this attribute
__le32 size; // 0x04: The size of this attribute
u8 non_res; // 0x08: Is this attribute non-resident ?
u8 name_len; // 0x09: This attribute name length
__le16 name_off; // 0x0A: Offset to the attribute name
__le16 flags; // 0x0C: See ATTR_FLAG_XXX
__le16 id; // 0x0E: unique id (per record)
enum ATTR_TYPE type; // 0x00: The type of this attribute.
__le32 size; // 0x04: The size of this attribute.
u8 non_res; // 0x08: Is this attribute non-resident?
u8 name_len; // 0x09: This attribute name length.
__le16 name_off; // 0x0A: Offset to the attribute name.
__le16 flags; // 0x0C: See ATTR_FLAG_XXX.
__le16 id; // 0x0E: Unique id (per record).
union {
struct ATTR_RESIDENT res; // 0x10
@ -378,7 +369,7 @@ struct ATTRIB {
};
};
/* Define attribute sizes */
/* Define attribute sizes. */
#define SIZEOF_RESIDENT 0x18
#define SIZEOF_NONRESIDENT_EX 0x48
#define SIZEOF_NONRESIDENT 0x40
@ -437,7 +428,7 @@ static inline u64 attr_svcn(const struct ATTRIB *attr)
return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
}
/* the size of resident attribute by its resident size */
/* The size of resident attribute by its resident size. */
#define BYTES_PER_RESIDENT(b) (0x18 + (b))
static_assert(sizeof(struct ATTRIB) == 0x48);
@ -475,16 +466,16 @@ static inline void *attr_run(const struct ATTRIB *attr)
return Add2Ptr(attr, le16_to_cpu(attr->nres.run_off));
}
/* Standard information attribute (0x10) */
/* Standard information attribute (0x10). */
struct ATTR_STD_INFO {
__le64 cr_time; // 0x00: File creation file
__le64 m_time; // 0x08: File modification time
__le64 c_time; // 0x10: Last time any attribute was modified
__le64 a_time; // 0x18: File last access time
enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
__le32 max_ver_num; // 0x24: Maximum Number of Versions
__le32 ver_num; // 0x28: Version Number
__le32 class_id; // 0x2C: Class Id from bidirectional Class Id index
__le64 cr_time; // 0x00: File creation file.
__le64 m_time; // 0x08: File modification time.
__le64 c_time; // 0x10: Last time any attribute was modified.
__le64 a_time; // 0x18: File last access time.
enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more.
__le32 max_ver_num; // 0x24: Maximum Number of Versions.
__le32 ver_num; // 0x28: Version Number.
__le32 class_id; // 0x2C: Class Id from bidirectional Class Id index.
};
static_assert(sizeof(struct ATTR_STD_INFO) == 0x30);
@ -493,17 +484,17 @@ static_assert(sizeof(struct ATTR_STD_INFO) == 0x30);
#define SECURITY_ID_FIRST 0x00000100
struct ATTR_STD_INFO5 {
__le64 cr_time; // 0x00: File creation file
__le64 m_time; // 0x08: File modification time
__le64 c_time; // 0x10: Last time any attribute was modified
__le64 a_time; // 0x18: File last access time
enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
__le32 max_ver_num; // 0x24: Maximum Number of Versions
__le32 ver_num; // 0x28: Version Number
__le32 class_id; // 0x2C: Class Id from bidirectional Class Id index
__le64 cr_time; // 0x00: File creation file.
__le64 m_time; // 0x08: File modification time.
__le64 c_time; // 0x10: Last time any attribute was modified.
__le64 a_time; // 0x18: File last access time.
enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more.
__le32 max_ver_num; // 0x24: Maximum Number of Versions.
__le32 ver_num; // 0x28: Version Number.
__le32 class_id; // 0x2C: Class Id from bidirectional Class Id index.
__le32 owner_id; // 0x30: Owner Id of the user owning the file.
__le32 security_id; // 0x34: The Security Id is a key in the $SII Index and $SDS
__le32 security_id; // 0x34: The Security Id is a key in the $SII Index and $SDS.
__le64 quota_charge; // 0x38:
__le64 usn; // 0x40: Last Update Sequence Number of the file. This is a direct
// index into the file $UsnJrnl. If zero, the USN Journal is
@ -512,16 +503,16 @@ struct ATTR_STD_INFO5 {
static_assert(sizeof(struct ATTR_STD_INFO5) == 0x48);
/* attribute list entry structure (0x20) */
/* Attribute list entry structure (0x20) */
struct ATTR_LIST_ENTRY {
enum ATTR_TYPE type; // 0x00: The type of attribute
__le16 size; // 0x04: The size of this record
u8 name_len; // 0x06: The length of attribute name
u8 name_off; // 0x07: The offset to attribute name
__le64 vcn; // 0x08: Starting VCN of this attribute
struct MFT_REF ref; // 0x10: MFT record number with attribute
__le16 id; // 0x18: struct ATTRIB ID
__le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset
enum ATTR_TYPE type; // 0x00: The type of attribute.
__le16 size; // 0x04: The size of this record.
u8 name_len; // 0x06: The length of attribute name.
u8 name_off; // 0x07: The offset to attribute name.
__le64 vcn; // 0x08: Starting VCN of this attribute.
struct MFT_REF ref; // 0x10: MFT record number with attribute.
__le16 id; // 0x18: struct ATTRIB ID.
__le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset.
}; // sizeof(0x20)
@ -533,7 +524,7 @@ static inline u32 le_size(u8 name_len)
name_len * sizeof(short), 8);
}
/* returns 0 if 'attr' has the same type and name */
/* Returns 0 if 'attr' has the same type and name. */
static inline int le_cmp(const struct ATTR_LIST_ENTRY *le,
const struct ATTRIB *attr)
{
@ -549,32 +540,32 @@ static inline __le16 const *le_name(const struct ATTR_LIST_ENTRY *le)
return Add2Ptr(le, le->name_off);
}
/* File name types (the field type in struct ATTR_FILE_NAME ) */
/* File name types (the field type in struct ATTR_FILE_NAME). */
#define FILE_NAME_POSIX 0
#define FILE_NAME_UNICODE 1
#define FILE_NAME_DOS 2
#define FILE_NAME_UNICODE_AND_DOS (FILE_NAME_DOS | FILE_NAME_UNICODE)
/* Filename attribute structure (0x30) */
/* Filename attribute structure (0x30). */
struct NTFS_DUP_INFO {
__le64 cr_time; // 0x00: File creation file
__le64 m_time; // 0x08: File modification time
__le64 c_time; // 0x10: Last time any attribute was modified
__le64 a_time; // 0x18: File last access time
__le64 alloc_size; // 0x20: Data attribute allocated size, multiple of cluster size
__le64 data_size; // 0x28: Data attribute size <= Dataalloc_size
enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more
__le16 ea_size; // 0x34: Packed EAs
__le16 reparse; // 0x36: Used by Reparse
__le64 cr_time; // 0x00: File creation file.
__le64 m_time; // 0x08: File modification time.
__le64 c_time; // 0x10: Last time any attribute was modified.
__le64 a_time; // 0x18: File last access time.
__le64 alloc_size; // 0x20: Data attribute allocated size, multiple of cluster size.
__le64 data_size; // 0x28: Data attribute size <= Dataalloc_size.
enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more.
__le16 ea_size; // 0x34: Packed EAs.
__le16 reparse; // 0x36: Used by Reparse.
}; // 0x38
struct ATTR_FILE_NAME {
struct MFT_REF home; // 0x00: MFT record for directory
struct NTFS_DUP_INFO dup;// 0x08
u8 name_len; // 0x40: File name length in words
u8 type; // 0x41: File name type
__le16 name[]; // 0x42: File name
struct MFT_REF home; // 0x00: MFT record for directory.
struct NTFS_DUP_INFO dup;// 0x08:
u8 name_len; // 0x40: File name length in words.
u8 type; // 0x41: File name type.
__le16 name[]; // 0x42: File name.
};
static_assert(sizeof(((struct ATTR_FILE_NAME *)NULL)->dup) == 0x38);
@ -589,7 +580,7 @@ static inline struct ATTRIB *attr_from_name(struct ATTR_FILE_NAME *fname)
static inline u16 fname_full_size(const struct ATTR_FILE_NAME *fname)
{
// don't return struct_size(fname, name, fname->name_len);
/* Don't return struct_size(fname, name, fname->name_len); */
return offsetof(struct ATTR_FILE_NAME, name) +
fname->name_len * sizeof(short);
}
@ -603,32 +594,32 @@ static inline u8 paired_name(u8 type)
return FILE_NAME_POSIX;
}
/* Index entry defines ( the field flags in NtfsDirEntry ) */
/* Index entry defines ( the field flags in NtfsDirEntry ). */
#define NTFS_IE_HAS_SUBNODES cpu_to_le16(1)
#define NTFS_IE_LAST cpu_to_le16(2)
/* Directory entry structure */
/* Directory entry structure. */
struct NTFS_DE {
union {
struct MFT_REF ref; // 0x00: MFT record number with this file
struct MFT_REF ref; // 0x00: MFT record number with this file.
struct {
__le16 data_off; // 0x00:
__le16 data_size; // 0x02:
__le32 res; // 0x04: must be 0
__le32 res; // 0x04: Must be 0.
} view;
};
__le16 size; // 0x08: The size of this entry
__le16 key_size; // 0x0A: The size of File name length in bytes + 0x42
__le16 flags; // 0x0C: Entry flags: NTFS_IE_XXX
__le16 size; // 0x08: The size of this entry.
__le16 key_size; // 0x0A: The size of File name length in bytes + 0x42.
__le16 flags; // 0x0C: Entry flags: NTFS_IE_XXX.
__le16 res; // 0x0E:
// Here any indexed attribute can be placed
// Here any indexed attribute can be placed.
// One of them is:
// struct ATTR_FILE_NAME AttrFileName;
//
// The last 8 bytes of this structure contains
// the VBN of subnode
// the VBN of subnode.
// !!! Note !!!
// This field is presented only if (flags & NTFS_IE_HAS_SUBNODES)
// __le64 vbn;
@ -698,11 +689,11 @@ static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
struct INDEX_HDR {
__le32 de_off; // 0x00: The offset from the start of this structure
// to the first NTFS_DE
// to the first NTFS_DE.
__le32 used; // 0x04: The size of this structure plus all
// entries (quad-word aligned)
__le32 total; // 0x08: The allocated size of for this structure plus all entries
u8 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory
// entries (quad-word aligned).
__le32 total; // 0x08: The allocated size of for this structure plus all entries.
u8 flags; // 0x0C: 0x00 = Small directory, 0x01 = Large directory.
u8 res[3];
//
@ -773,7 +764,7 @@ static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
return !(ib->ihdr.flags & 1);
}
/* Index root structure ( 0x90 ) */
/* Index root structure ( 0x90 ). */
enum COLLATION_RULE {
NTFS_COLLATION_TYPE_BINARY = cpu_to_le32(0),
// $I30
@ -792,10 +783,10 @@ static_assert(sizeof(enum COLLATION_RULE) == 4);
//
struct INDEX_ROOT {
enum ATTR_TYPE type; // 0x00: The type of attribute to index on
enum COLLATION_RULE rule; // 0x04: The rule
__le32 index_block_size;// 0x08: The size of index record
u8 index_block_clst; // 0x0C: The number of clusters or sectors per index
enum ATTR_TYPE type; // 0x00: The type of attribute to index on.
enum COLLATION_RULE rule; // 0x04: The rule.
__le32 index_block_size;// 0x08: The size of index record.
u8 index_block_clst; // 0x0C: The number of clusters or sectors per index.
u8 res[3];
struct INDEX_HDR ihdr; // 0x10:
};
@ -824,24 +815,24 @@ struct VOLUME_INFO {
#define NTFS_ATTR_MUST_BE_RESIDENT cpu_to_le32(0x00000040)
#define NTFS_ATTR_LOG_ALWAYS cpu_to_le32(0x00000080)
/* $AttrDef file entry */
/* $AttrDef file entry. */
struct ATTR_DEF_ENTRY {
__le16 name[0x40]; // 0x00: Attr name
enum ATTR_TYPE type; // 0x80: struct ATTRIB type
__le16 name[0x40]; // 0x00: Attr name.
enum ATTR_TYPE type; // 0x80: struct ATTRIB type.
__le32 res; // 0x84:
enum COLLATION_RULE rule; // 0x88:
__le32 flags; // 0x8C: NTFS_ATTR_XXX (see above)
__le64 min_sz; // 0x90: Minimum attribute data size
__le64 max_sz; // 0x98: Maximum attribute data size
__le32 flags; // 0x8C: NTFS_ATTR_XXX (see above).
__le64 min_sz; // 0x90: Minimum attribute data size.
__le64 max_sz; // 0x98: Maximum attribute data size.
};
static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
/* Object ID (0x40) */
struct OBJECT_ID {
struct GUID ObjId; // 0x00: Unique Id assigned to file
struct GUID BirthVolumeId;// 0x10: Birth Volume Id is the Object Id of the Volume on
// which the Object Id was allocated. It never changes
struct GUID ObjId; // 0x00: Unique Id assigned to file.
struct GUID BirthVolumeId; // 0x10: Birth Volume Id is the Object Id of the Volume on.
// which the Object Id was allocated. It never changes.
struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
// ever assigned to this MFT Record. I.e. If the Object Id
// is changed for some reason, this field will reflect the
@ -857,15 +848,15 @@ static_assert(sizeof(struct OBJECT_ID) == 0x40);
/* O Directory entry structure ( rule = 0x13 ) */
struct NTFS_DE_O {
struct NTFS_DE de;
struct GUID ObjId; // 0x10: Unique Id assigned to file
struct MFT_REF ref; // 0x20: MFT record number with this file
struct GUID ObjId; // 0x10: Unique Id assigned to file.
struct MFT_REF ref; // 0x20: MFT record number with this file.
struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
// which the Object Id was allocated. It never changes
// which the Object Id was allocated. It never changes.
struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
// ever assigned to this MFT Record. I.e. If the Object Id
// is changed for some reason, this field will reflect the
// original value of the Object Id.
// This field is valid if data_size == 0x48
// This field is valid if data_size == 0x48.
struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
// to be used in a network environment where the local
// machine is part of a Windows 2000 Domain. This may be
@ -907,13 +898,13 @@ struct SECURITY_KEY {
/* Security descriptors (the content of $Secure::SDS data stream) */
struct SECURITY_HDR {
struct SECURITY_KEY key; // 0x00: Security Key
__le64 off; // 0x08: Offset of this entry in the file
__le32 size; // 0x10: Size of this entry, 8 byte aligned
//
// Security descriptor itself is placed here
// Total size is 16 byte aligned
//
struct SECURITY_KEY key; // 0x00: Security Key.
__le64 off; // 0x08: Offset of this entry in the file.
__le32 size; // 0x10: Size of this entry, 8 byte aligned.
/*
* Security descriptor itself is placed here.
* Total size is 16 byte aligned.
*/
} __packed;
#define SIZEOF_SECURITY_HDR 0x14
@ -948,8 +939,8 @@ static_assert(offsetof(struct REPARSE_KEY, ref) == 0x04);
/* Reparse Directory entry structure */
struct NTFS_DE_R {
struct NTFS_DE de;
struct REPARSE_KEY key; // 0x10: Reparse Key
u32 zero; // 0x1c
struct REPARSE_KEY key; // 0x10: Reparse Key.
u32 zero; // 0x1c:
}; // sizeof() = 0x20
static_assert(sizeof(struct NTFS_DE_R) == 0x20);
@ -991,69 +982,63 @@ struct REPARSE_POINT {
static_assert(sizeof(struct REPARSE_POINT) == 0x18);
//
// Maximum allowed size of the reparse data.
//
/* Maximum allowed size of the reparse data. */
#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE (16 * 1024)
//
// The value of the following constant needs to satisfy the following
// conditions:
// (1) Be at least as large as the largest of the reserved tags.
// (2) Be strictly smaller than all the tags in use.
//
/*
* The value of the following constant needs to satisfy the following
* conditions:
* (1) Be at least as large as the largest of the reserved tags.
* (2) Be strictly smaller than all the tags in use.
*/
#define IO_REPARSE_TAG_RESERVED_RANGE 1
//
// The reparse tags are a ULONG. The 32 bits are laid out as follows:
//
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
// +-+-+-+-+-----------------------+-------------------------------+
// |M|R|N|R| Reserved bits | Reparse Tag Value |
// +-+-+-+-+-----------------------+-------------------------------+
//
// M is the Microsoft bit. When set to 1, it denotes a tag owned by Microsoft.
// All ISVs must use a tag with a 0 in this position.
// Note: If a Microsoft tag is used by non-Microsoft software, the
// behavior is not defined.
//
// R is reserved. Must be zero for non-Microsoft tags.
//
// N is name surrogate. When set to 1, the file represents another named
// entity in the system.
//
// The M and N bits are OR-able.
// The following macros check for the M and N bit values:
//
/*
* The reparse tags are a ULONG. The 32 bits are laid out as follows:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-----------------------+-------------------------------+
* |M|R|N|R| Reserved bits | Reparse Tag Value |
* +-+-+-+-+-----------------------+-------------------------------+
*
* M is the Microsoft bit. When set to 1, it denotes a tag owned by Microsoft.
* All ISVs must use a tag with a 0 in this position.
* Note: If a Microsoft tag is used by non-Microsoft software, the
* behavior is not defined.
*
* R is reserved. Must be zero for non-Microsoft tags.
*
* N is name surrogate. When set to 1, the file represents another named
* entity in the system.
*
* The M and N bits are OR-able.
* The following macros check for the M and N bit values:
*/
//
// Macro to determine whether a reparse point tag corresponds to a tag
// owned by Microsoft.
//
/*
* Macro to determine whether a reparse point tag corresponds to a tag
* owned by Microsoft.
*/
#define IsReparseTagMicrosoft(_tag) (((_tag)&IO_REPARSE_TAG_MICROSOFT))
//
// Macro to determine whether a reparse point tag is a name surrogate
//
/* Macro to determine whether a reparse point tag is a name surrogate. */
#define IsReparseTagNameSurrogate(_tag) (((_tag)&IO_REPARSE_TAG_NAME_SURROGATE))
//
// The following constant represents the bits that are valid to use in
// reparse tags.
//
/*
* The following constant represents the bits that are valid to use in
* reparse tags.
*/
#define IO_REPARSE_TAG_VALID_VALUES 0xF000FFFF
//
// Macro to determine whether a reparse tag is a valid tag.
//
/*
* Macro to determine whether a reparse tag is a valid tag.
*/
#define IsReparseTagValid(_tag) \
(!((_tag) & ~IO_REPARSE_TAG_VALID_VALUES) && \
((_tag) > IO_REPARSE_TAG_RESERVED_RANGE))
//
// Microsoft tags for reparse points.
//
/* Microsoft tags for reparse points. */
enum IO_REPARSE_TAG {
IO_REPARSE_TAG_SYMBOLIC_LINK = cpu_to_le32(0),
@ -1066,62 +1051,48 @@ enum IO_REPARSE_TAG {
IO_REPARSE_TAG_DEDUP = cpu_to_le32(0x80000013),
IO_REPARSE_TAG_COMPRESS = cpu_to_le32(0x80000017),
//
// The reparse tag 0x80000008 is reserved for Microsoft internal use
// (may be published in the future)
//
/*
* The reparse tag 0x80000008 is reserved for Microsoft internal use.
* May be published in the future.
*/
//
// Microsoft reparse tag reserved for DFS
//
IO_REPARSE_TAG_DFS = cpu_to_le32(0x8000000A),
/* Microsoft reparse tag reserved for DFS */
IO_REPARSE_TAG_DFS = cpu_to_le32(0x8000000A),
//
// Microsoft reparse tag reserved for the file system filter manager
//
/* Microsoft reparse tag reserved for the file system filter manager. */
IO_REPARSE_TAG_FILTER_MANAGER = cpu_to_le32(0x8000000B),
//
// Non-Microsoft tags for reparse points
//
/* Non-Microsoft tags for reparse points */
//
// Tag allocated to CONGRUENT, May 2000. Used by IFSTEST
//
/* Tag allocated to CONGRUENT, May 2000. Used by IFSTEST. */
IO_REPARSE_TAG_IFSTEST_CONGRUENT = cpu_to_le32(0x00000009),
//
// Tag allocated to ARKIVIO
//
IO_REPARSE_TAG_ARKIVIO = cpu_to_le32(0x0000000C),
/* Tag allocated to ARKIVIO. */
IO_REPARSE_TAG_ARKIVIO = cpu_to_le32(0x0000000C),
//
// Tag allocated to SOLUTIONSOFT
//
/* Tag allocated to SOLUTIONSOFT. */
IO_REPARSE_TAG_SOLUTIONSOFT = cpu_to_le32(0x2000000D),
//
// Tag allocated to COMMVAULT
//
/* Tag allocated to COMMVAULT. */
IO_REPARSE_TAG_COMMVAULT = cpu_to_le32(0x0000000E),
// OneDrive??
IO_REPARSE_TAG_CLOUD = cpu_to_le32(0x9000001A),
IO_REPARSE_TAG_CLOUD_1 = cpu_to_le32(0x9000101A),
IO_REPARSE_TAG_CLOUD_2 = cpu_to_le32(0x9000201A),
IO_REPARSE_TAG_CLOUD_3 = cpu_to_le32(0x9000301A),
IO_REPARSE_TAG_CLOUD_4 = cpu_to_le32(0x9000401A),
IO_REPARSE_TAG_CLOUD_5 = cpu_to_le32(0x9000501A),
IO_REPARSE_TAG_CLOUD_6 = cpu_to_le32(0x9000601A),
IO_REPARSE_TAG_CLOUD_7 = cpu_to_le32(0x9000701A),
IO_REPARSE_TAG_CLOUD_8 = cpu_to_le32(0x9000801A),
IO_REPARSE_TAG_CLOUD_9 = cpu_to_le32(0x9000901A),
IO_REPARSE_TAG_CLOUD_A = cpu_to_le32(0x9000A01A),
IO_REPARSE_TAG_CLOUD_B = cpu_to_le32(0x9000B01A),
IO_REPARSE_TAG_CLOUD_C = cpu_to_le32(0x9000C01A),
IO_REPARSE_TAG_CLOUD_D = cpu_to_le32(0x9000D01A),
IO_REPARSE_TAG_CLOUD_E = cpu_to_le32(0x9000E01A),
IO_REPARSE_TAG_CLOUD_F = cpu_to_le32(0x9000F01A),
/* OneDrive?? */
IO_REPARSE_TAG_CLOUD = cpu_to_le32(0x9000001A),
IO_REPARSE_TAG_CLOUD_1 = cpu_to_le32(0x9000101A),
IO_REPARSE_TAG_CLOUD_2 = cpu_to_le32(0x9000201A),
IO_REPARSE_TAG_CLOUD_3 = cpu_to_le32(0x9000301A),
IO_REPARSE_TAG_CLOUD_4 = cpu_to_le32(0x9000401A),
IO_REPARSE_TAG_CLOUD_5 = cpu_to_le32(0x9000501A),
IO_REPARSE_TAG_CLOUD_6 = cpu_to_le32(0x9000601A),
IO_REPARSE_TAG_CLOUD_7 = cpu_to_le32(0x9000701A),
IO_REPARSE_TAG_CLOUD_8 = cpu_to_le32(0x9000801A),
IO_REPARSE_TAG_CLOUD_9 = cpu_to_le32(0x9000901A),
IO_REPARSE_TAG_CLOUD_A = cpu_to_le32(0x9000A01A),
IO_REPARSE_TAG_CLOUD_B = cpu_to_le32(0x9000B01A),
IO_REPARSE_TAG_CLOUD_C = cpu_to_le32(0x9000C01A),
IO_REPARSE_TAG_CLOUD_D = cpu_to_le32(0x9000D01A),
IO_REPARSE_TAG_CLOUD_E = cpu_to_le32(0x9000E01A),
IO_REPARSE_TAG_CLOUD_F = cpu_to_le32(0x9000F01A),
};
@ -1134,7 +1105,7 @@ struct REPARSE_DATA_BUFFER {
__le16 Reserved;
union {
// If ReparseTag == 0xA0000003 (IO_REPARSE_TAG_MOUNT_POINT)
/* If ReparseTag == 0xA0000003 (IO_REPARSE_TAG_MOUNT_POINT) */
struct {
__le16 SubstituteNameOffset; // 0x08
__le16 SubstituteNameLength; // 0x0A
@ -1143,8 +1114,10 @@ struct REPARSE_DATA_BUFFER {
__le16 PathBuffer[]; // 0x10
} MountPointReparseBuffer;
// If ReparseTag == 0xA000000C (IO_REPARSE_TAG_SYMLINK)
// https://msdn.microsoft.com/en-us/library/cc232006.aspx
/*
* If ReparseTag == 0xA000000C (IO_REPARSE_TAG_SYMLINK)
* https://msdn.microsoft.com/en-us/library/cc232006.aspx
*/
struct {
__le16 SubstituteNameOffset; // 0x08
__le16 SubstituteNameLength; // 0x0A
@ -1155,19 +1128,20 @@ struct REPARSE_DATA_BUFFER {
__le16 PathBuffer[]; // 0x14
} SymbolicLinkReparseBuffer;
// If ReparseTag == 0x80000017U
/* If ReparseTag == 0x80000017U */
struct {
__le32 WofVersion; // 0x08 == 1
/* 1 - WIM backing provider ("WIMBoot"),
/*
* 1 - WIM backing provider ("WIMBoot"),
* 2 - System compressed file provider
*/
__le32 WofProvider; // 0x0C
__le32 WofProvider; // 0x0C:
__le32 ProviderVer; // 0x10: == 1 WOF_FILE_PROVIDER_CURRENT_VERSION == 1
__le32 CompressionFormat; // 0x14: 0, 1, 2, 3. See WOF_COMPRESSION_XXX
} CompressReparseBuffer;
struct {
u8 DataBuffer[1]; // 0x08
u8 DataBuffer[1]; // 0x08:
} GenericReparseBuffer;
};
};
@ -1175,13 +1149,14 @@ struct REPARSE_DATA_BUFFER {
/* ATTR_EA_INFO (0xD0) */
#define FILE_NEED_EA 0x80 // See ntifs.h
/* FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
/*
*FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
* interpreted without understanding the associated extended attributes.
*/
struct EA_INFO {
__le16 size_pack; // 0x00: Size of buffer to hold in packed form
__le16 count; // 0x02: Count of EA's with FILE_NEED_EA bit set
__le32 size; // 0x04: Size of buffer to hold in unpacked form
__le16 size_pack; // 0x00: Size of buffer to hold in packed form.
__le16 count; // 0x02: Count of EA's with FILE_NEED_EA bit set.
__le32 size; // 0x04: Size of buffer to hold in unpacked form.
};
static_assert(sizeof(struct EA_INFO) == 8);
@ -1189,10 +1164,10 @@ static_assert(sizeof(struct EA_INFO) == 8);
/* ATTR_EA (0xE0) */
struct EA_FULL {
__le32 size; // 0x00: (not in packed)
u8 flags; // 0x04
u8 name_len; // 0x05
__le16 elength; // 0x06
u8 name[]; // 0x08
u8 flags; // 0x04:
u8 name_len; // 0x05:
__le16 elength; // 0x06:
u8 name[]; // 0x08:
};
static_assert(offsetof(struct EA_FULL, name) == 8);

View File

@ -17,33 +17,33 @@
#define MAXIMUM_BYTES_PER_INDEX 4096
#define NTFS_BLOCKS_PER_INODE (MAXIMUM_BYTES_PER_INDEX / 512)
/* ntfs specific error code when fixup failed*/
/* NTFS specific error code when fixup failed. */
#define E_NTFS_FIXUP 555
/* ntfs specific error code about resident->nonresident*/
/* NTFS specific error code about resident->nonresident. */
#define E_NTFS_NONRESIDENT 556
/* ntfs specific error code about punch hole*/
/* NTFS specific error code about punch hole. */
#define E_NTFS_NOTALIGNED 557
/* sbi->flags */
#define NTFS_FLAGS_NODISCARD 0x00000001
/* Set when LogFile is replaying */
/* Set when LogFile is replaying. */
#define NTFS_FLAGS_LOG_REPLAYING 0x00000008
/* Set when we changed first MFT's which copy must be updated in $MftMirr */
/* Set when we changed first MFT's which copy must be updated in $MftMirr. */
#define NTFS_FLAGS_MFTMIRR 0x00001000
#define NTFS_FLAGS_NEED_REPLAY 0x04000000
/* ni->ni_flags */
/*
* Data attribute is external compressed (lzx/xpress)
* Data attribute is external compressed (LZX/Xpress)
* 1 - WOF_COMPRESSION_XPRESS4K
* 2 - WOF_COMPRESSION_XPRESS8K
* 3 - WOF_COMPRESSION_XPRESS16K
* 4 - WOF_COMPRESSION_LZX32K
*/
#define NI_FLAG_COMPRESSED_MASK 0x0000000f
/* Data attribute is deduplicated */
/* Data attribute is deduplicated. */
#define NI_FLAG_DEDUPLICATED 0x00000010
#define NI_FLAG_EA 0x00000020
#define NI_FLAG_DIR 0x00000040
@ -59,29 +59,29 @@ struct ntfs_mount_options {
u16 fs_fmask_inv;
u16 fs_dmask_inv;
unsigned uid : 1, /* uid was set */
gid : 1, /* gid was set */
fmask : 1, /* fmask was set */
dmask : 1, /*dmask was set*/
sys_immutable : 1, /* immutable system files */
discard : 1, /* issue discard requests on deletions */
sparse : 1, /*create sparse files*/
showmeta : 1, /*show meta files*/
nohidden : 1, /*do not show hidden files*/
force : 1, /*rw mount dirty volume*/
no_acs_rules : 1, /*exclude acs rules*/
prealloc : 1 /*preallocate space when file is growing*/
unsigned uid : 1, /* uid was set. */
gid : 1, /* gid was set. */
fmask : 1, /* fmask was set. */
dmask : 1, /* dmask was set. */
sys_immutable : 1,/* Immutable system files. */
discard : 1, /* Issue discard requests on deletions. */
sparse : 1, /* Create sparse files. */
showmeta : 1, /* Show meta files. */
nohidden : 1, /* Do not show hidden files. */
force : 1, /* Rw mount dirty volume. */
no_acs_rules : 1,/*Exclude acs rules. */
prealloc : 1 /* Preallocate space when file is growing. */
;
};
/* special value to unpack and deallocate*/
/* Special value to unpack and deallocate. */
#define RUN_DEALLOCATE ((struct runs_tree *)(size_t)1)
/* TODO: use rb tree instead of array */
/* TODO: Use rb tree instead of array. */
struct runs_tree {
struct ntfs_run *runs;
size_t count; // Currently used size a ntfs_run storage.
size_t allocated; // Currently allocated ntfs_run storage size.
size_t count; /* Currently used size a ntfs_run storage. */
size_t allocated; /* Currently allocated ntfs_run storage size. */
};
struct ntfs_buffers {
@ -94,8 +94,8 @@ struct ntfs_buffers {
};
enum ALLOCATE_OPT {
ALLOCATE_DEF = 0, // Allocate all clusters
ALLOCATE_MFT = 1, // Allocate for MFT
ALLOCATE_DEF = 0, // Allocate all clusters.
ALLOCATE_MFT = 1, // Allocate for MFT.
};
enum bitmap_mutex_classes {
@ -110,29 +110,29 @@ struct wnd_bitmap {
struct runs_tree run;
size_t nbits;
size_t total_zeroes; // total number of free bits
u16 *free_bits; // free bits in each window
size_t total_zeroes; // Total number of free bits.
u16 *free_bits; // Free bits in each window.
size_t nwnd;
u32 bits_last; // bits in last window
u32 bits_last; // Bits in last window.
struct rb_root start_tree; // extents, sorted by 'start'
struct rb_root count_tree; // extents, sorted by 'count + start'
size_t count; // extents count
struct rb_root start_tree; // Extents, sorted by 'start'.
struct rb_root count_tree; // Extents, sorted by 'count + start'.
size_t count; // Extents count.
/*
* -1 Tree is activated but not updated (too many fragments)
* 0 - Tree is not activated
* 1 - Tree is activated and updated
* -1 Tree is activated but not updated (too many fragments).
* 0 - Tree is not activated.
* 1 - Tree is activated and updated.
*/
int uptodated;
size_t extent_min; // Minimal extent used while building
size_t extent_max; // Upper estimate of biggest free block
size_t extent_min; // Minimal extent used while building.
size_t extent_max; // Upper estimate of biggest free block.
/* Zone [bit, end) */
size_t zone_bit;
size_t zone_end;
bool set_tail; // not necessary in driver
bool set_tail; // Not necessary in driver.
bool inited;
};
@ -149,14 +149,14 @@ enum index_mutex_classed {
INDEX_MUTEX_TOTAL
};
/* ntfs_index - allocation unit inside directory */
/* ntfs_index - Allocation unit inside directory. */
struct ntfs_index {
struct runs_tree bitmap_run;
struct runs_tree alloc_run;
/* read/write access to 'bitmap_run'/'alloc_run' while ntfs_readdir */
struct rw_semaphore run_lock;
/*TODO: remove 'cmp'*/
/*TODO: Remove 'cmp'. */
NTFS_CMP_FUNC cmp;
u8 index_bits; // log2(root->index_block_size)
@ -165,10 +165,10 @@ struct ntfs_index {
u8 type; // index_mutex_classed
};
/* Minimum mft zone */
/* Minimum MFT zone. */
#define NTFS_MIN_MFT_ZONE 100
/* ntfs file system in-core superblock data */
/* Ntfs file system in-core superblock data. */
struct ntfs_sb_info {
struct super_block *sb;
@ -189,23 +189,23 @@ struct ntfs_sb_info {
u8 cluster_bits;
u8 record_bits;
u64 maxbytes; // Maximum size for normal files
u64 maxbytes_sparse; // Maximum size for sparse file
u64 maxbytes; // Maximum size for normal files.
u64 maxbytes_sparse; // Maximum size for sparse file.
u32 flags; // See NTFS_FLAGS_XXX
u32 flags; // See NTFS_FLAGS_XXX.
CLST bad_clusters; // The count of marked bad clusters
CLST bad_clusters; // The count of marked bad clusters.
u16 max_bytes_per_attr; // maximum attribute size in record
u16 attr_size_tr; // attribute size threshold (320 bytes)
u16 max_bytes_per_attr; // Maximum attribute size in record.
u16 attr_size_tr; // Attribute size threshold (320 bytes).
/* Records in $Extend */
/* Records in $Extend. */
CLST objid_no;
CLST quota_no;
CLST reparse_no;
CLST usn_jrnl_no;
struct ATTR_DEF_ENTRY *def_table; // attribute definition table
struct ATTR_DEF_ENTRY *def_table; // Attribute definition table.
u32 def_entries;
u32 ea_max_size;
@ -218,13 +218,13 @@ struct ntfs_sb_info {
struct ntfs_inode *ni;
struct wnd_bitmap bitmap; // $MFT::Bitmap
/*
* MFT records [11-24) used to expand MFT itself
* MFT records [11-24) used to expand MFT itself.
* They always marked as used in $MFT::Bitmap
* 'reserved_bitmap' contains real bitmap of these records
* 'reserved_bitmap' contains real bitmap of these records.
*/
ulong reserved_bitmap; // bitmap of used records [11 - 24)
ulong reserved_bitmap; // Bitmap of used records [11 - 24)
size_t next_free; // The next record to allocate from
size_t used; // mft valid size in records
size_t used; // MFT valid size in records.
u32 recs_mirr; // Number of records in MFTMirr
u8 next_reserved;
u8 reserved_bitmap_inited;
@ -236,15 +236,15 @@ struct ntfs_sb_info {
} used;
struct {
u64 size; // in bytes
u64 blocks; // in blocks
u64 size; // In bytes.
u64 blocks; // In blocks.
u64 ser_num;
struct ntfs_inode *ni;
__le16 flags; // cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY
__le16 flags; // Cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY.
u8 major_ver;
u8 minor_ver;
char label[65];
bool real_dirty; /* real fs state*/
bool real_dirty; // Real fs state.
} volume;
struct {
@ -283,9 +283,7 @@ struct ntfs_sb_info {
struct ratelimit_state msg_ratelimit;
};
/*
* one MFT record(usually 1024 bytes), consists of attributes
*/
/* One MFT record(usually 1024 bytes), consists of attributes. */
struct mft_inode {
struct rb_node node;
struct ntfs_sb_info *sbi;
@ -297,7 +295,7 @@ struct mft_inode {
bool dirty;
};
/* nested class for ntfs_inode::ni_lock */
/* Nested class for ntfs_inode::ni_lock. */
enum ntfs_inode_mutex_lock_class {
NTFS_INODE_MUTEX_DIRTY,
NTFS_INODE_MUTEX_SECURITY,
@ -308,29 +306,31 @@ enum ntfs_inode_mutex_lock_class {
};
/*
* ntfs inode - extends linux inode. consists of one or more mft inodes
* sturct ntfs_inode
*
* Ntfs inode - extends linux inode. consists of one or more MFT inodes.
*/
struct ntfs_inode {
struct mft_inode mi; // base record
/*
* Valid size: [0 - i_valid) - these range in file contains valid data
* Range [i_valid - inode->i_size) - contains 0
* Usually i_valid <= inode->i_size
* Valid size: [0 - i_valid) - these range in file contains valid data.
* Range [i_valid - inode->i_size) - contains 0.
* Usually i_valid <= inode->i_size.
*/
u64 i_valid;
struct timespec64 i_crtime;
struct mutex ni_lock;
/* file attributes from std */
/* File attributes from std. */
enum FILE_ATTRIBUTE std_fa;
__le32 std_security_id;
/*
* tree of mft_inode
* not empty when primary MFT record (usually 1024 bytes) can't save all attributes
* e.g. file becomes too fragmented or contains a lot of names
* Tree of mft_inode.
* Not empty when primary MFT record (usually 1024 bytes) can't save all attributes
* e.g. file becomes too fragmented or contains a lot of names.
*/
struct rb_root mi_tree;
@ -352,7 +352,7 @@ struct ntfs_inode {
struct {
struct runs_tree run;
struct ATTR_LIST_ENTRY *le; // 1K aligned memory
struct ATTR_LIST_ENTRY *le; // 1K aligned memory.
size_t size;
bool dirty;
} attr_list;
@ -381,7 +381,7 @@ enum REPARSE_SIGN {
REPARSE_LINK = 3
};
/* functions from attrib.c*/
/* Functions from attrib.c */
int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
struct runs_tree *run, const CLST *vcn);
int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
@ -416,7 +416,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size);
/* functions from attrlist.c*/
/* Functions from attrlist.c */
void al_destroy(struct ntfs_inode *ni);
bool al_verify(struct ntfs_inode *ni);
int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr);
@ -442,12 +442,12 @@ static inline size_t al_aligned(size_t size)
return (size + 1023) & ~(size_t)1023;
}
/* globals from bitfunc.c */
/* Globals from bitfunc.c */
bool are_bits_clear(const ulong *map, size_t bit, size_t nbits);
bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
/* globals from dir.c */
/* Globals from dir.c */
int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
u8 *buf, int buf_len);
int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
@ -458,7 +458,7 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
bool dir_is_empty(struct inode *dir);
extern const struct file_operations ntfs_dir_operations;
/* globals from file.c*/
/* Globals from file.c */
int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags);
void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
@ -472,7 +472,7 @@ extern const struct inode_operations ntfs_special_inode_operations;
extern const struct inode_operations ntfs_file_inode_operations;
extern const struct file_operations ntfs_file_operations;
/* globals from frecord.c */
/* Globals from frecord.c */
void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni);
struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni);
@ -529,10 +529,10 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
u32 pages_per_frame);
/* globals from fslog.c */
/* Globals from fslog.c */
int log_replay(struct ntfs_inode *ni, bool *initialized);
/* globals from fsntfs.c */
/* Globals from fsntfs.c */
bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
bool simple);
@ -598,7 +598,7 @@ int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
/* globals from index.c */
/* Globals from index.c */
int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
void fnd_clear(struct ntfs_fnd *fnd);
static inline struct ntfs_fnd *fnd_get(void)
@ -638,7 +638,7 @@ int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
const struct ATTR_FILE_NAME *fname,
const struct NTFS_DUP_INFO *dup, int sync);
/* globals from inode.c */
/* Globals from inode.c */
struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
const struct cpu_str *name);
int ntfs_set_size(struct inode *inode, u64 new_size);
@ -662,7 +662,7 @@ extern const struct inode_operations ntfs_link_inode_operations;
extern const struct address_space_operations ntfs_aops;
extern const struct address_space_operations ntfs_aops_cmpr;
/* globals from name_i.c*/
/* Globals from name_i.c */
int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
const struct cpu_str *uni);
struct dentry *ntfs3_get_parent(struct dentry *child);
@ -670,7 +670,7 @@ struct dentry *ntfs3_get_parent(struct dentry *child);
extern const struct inode_operations ntfs_dir_inode_operations;
extern const struct inode_operations ntfs_special_inode_operations;
/* globals from record.c */
/* Globals from record.c */
int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
void mi_put(struct mft_inode *mi);
int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
@ -724,7 +724,7 @@ static inline void mi_get_ref(const struct mft_inode *mi, struct MFT_REF *ref)
ref->seq = mi->mrec->seq;
}
/* globals from run.c */
/* Globals from run.c */
bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
CLST *len, size_t *index);
void run_truncate(struct runs_tree *run, CLST vcn);
@ -753,13 +753,13 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
#endif
int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
/* globals from super.c */
/* Globals from super.c */
void *ntfs_set_shared(void *ptr, u32 bytes);
void *ntfs_put_shared(void *ptr);
void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len);
int ntfs_discard(struct ntfs_sb_info *sbi, CLST Lcn, CLST Len);
/* globals from bitmap.c*/
/* Globals from bitmap.c*/
int __init ntfs3_init_bitmap(void);
void ntfs3_exit_bitmap(void);
void wnd_close(struct wnd_bitmap *wnd);
@ -773,7 +773,7 @@ int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
/* Possible values for 'flags' 'wnd_find' */
/* Possible values for 'flags' 'wnd_find'. */
#define BITMAP_FIND_MARK_AS_USED 0x01
#define BITMAP_FIND_FULL 0x02
size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
@ -782,7 +782,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits);
void wnd_zone_set(struct wnd_bitmap *wnd, size_t Lcn, size_t Len);
int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
/* globals from upcase.c */
/* Globals from upcase.c */
int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
const u16 *upcase, bool bothcase);
int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
@ -822,7 +822,7 @@ static inline bool is_ntfs3(struct ntfs_sb_info *sbi)
return sbi->volume.major_ver >= 3;
}
/*(sb->s_flags & SB_ACTIVE)*/
/* (sb->s_flags & SB_ACTIVE) */
static inline bool is_mounted(struct ntfs_sb_info *sbi)
{
return !!sbi->sb->s_root;
@ -897,7 +897,7 @@ static inline bool run_is_empty(struct runs_tree *run)
return !run->count;
}
/* NTFS uses quad aligned bitmaps */
/* NTFS uses quad aligned bitmaps. */
static inline size_t bitmap_size(size_t bits)
{
return ALIGN((bits + 7) >> 3, 8);
@ -909,9 +909,7 @@ static inline size_t bitmap_size(size_t bits)
#define NTFS_TIME_GRAN 100
/*
* kernel2nt
*
* converts in-memory kernel timestamp into nt time
* kernel2nt - Converts in-memory kernel timestamp into nt time.
*/
static inline __le64 kernel2nt(const struct timespec64 *ts)
{
@ -922,9 +920,7 @@ static inline __le64 kernel2nt(const struct timespec64 *ts)
}
/*
* nt2kernel
*
* converts on-disk nt time into kernel timestamp
* nt2kernel - Converts on-disk nt time into kernel timestamp.
*/
static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
{
@ -940,13 +936,17 @@ static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
return sb->s_fs_info;
}
/* Align up on cluster boundary */
/*
* ntfs_up_cluster - Align up on cluster boundary.
*/
static inline u64 ntfs_up_cluster(const struct ntfs_sb_info *sbi, u64 size)
{
return (size + sbi->cluster_mask) & sbi->cluster_mask_inv;
}
/* Align up on cluster boundary */
/*
* ntfs_up_block - Align up on cluster boundary.
*/
static inline u64 ntfs_up_block(const struct super_block *sb, u64 size)
{
return (size + sb->s_blocksize - 1) & ~(u64)(sb->s_blocksize - 1);
@ -991,7 +991,7 @@ static inline int ni_ext_compress_bits(const struct ntfs_inode *ni)
return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
}
/* bits - 0xc, 0xd, 0xe, 0xf, 0x10 */
/* Bits - 0xc, 0xd, 0xe, 0xf, 0x10 */
static inline void ni_set_ext_compress_bits(struct ntfs_inode *ni, u8 bits)
{
ni->ni_flags |= (bits - 0xb) & NI_FLAG_COMPRESSED_MASK;

View File

@ -18,15 +18,13 @@ static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
const __le16 *name, u8 name_len,
const u16 *upcase)
{
/* First, compare the type codes: */
/* First, compare the type codes. */
int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
if (diff)
return diff;
/*
* They have the same type code, so we have to compare the names.
*/
/* They have the same type code, so we have to compare the names. */
return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
upcase, true);
}
@ -34,7 +32,7 @@ static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
/*
* mi_new_attt_id
*
* returns unused attribute id that is less than mrec->next_attr_id
* Return: Unused attribute id that is less than mrec->next_attr_id.
*/
static __le16 mi_new_attt_id(struct mft_inode *mi)
{
@ -50,7 +48,7 @@ static __le16 mi_new_attt_id(struct mft_inode *mi)
return id;
}
/* One record can store up to 1024/24 ~= 42 attributes */
/* One record can store up to 1024/24 ~= 42 attributes. */
free_id = 0;
max_id = 0;
@ -115,9 +113,7 @@ int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
}
/*
* mi_read
*
* reads MFT data
* mi_read - Read MFT data.
*/
int mi_read(struct mft_inode *mi, bool is_mft)
{
@ -178,7 +174,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
goto out;
ok:
/* check field 'total' only here */
/* Check field 'total' only here. */
if (le32_to_cpu(rec->total) != bpr) {
err = -EINVAL;
goto out;
@ -210,13 +206,13 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return NULL;
}
/* Skip non-resident records */
/* Skip non-resident records. */
if (!is_rec_inuse(rec))
return NULL;
attr = Add2Ptr(rec, off);
} else {
/* Check if input attr inside record */
/* Check if input attr inside record. */
off = PtrOffset(rec, attr);
if (off >= used)
return NULL;
@ -233,27 +229,27 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
asize = le32_to_cpu(attr->size);
/* Can we use the first field (attr->type) */
/* Can we use the first field (attr->type). */
if (off + 8 > used) {
static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
return NULL;
}
if (attr->type == ATTR_END) {
/* end of enumeration */
/* End of enumeration. */
return NULL;
}
/* 0x100 is last known attribute for now*/
/* 0x100 is last known attribute for now. */
t32 = le32_to_cpu(attr->type);
if ((t32 & 0xf) || (t32 > 0x100))
return NULL;
/* Check boundary */
/* Check boundary. */
if (off + asize > used)
return NULL;
/* Check size of attribute */
/* Check size of attribute. */
if (!attr->non_res) {
if (asize < SIZEOF_RESIDENT)
return NULL;
@ -270,7 +266,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return attr;
}
/* Check some nonresident fields */
/* Check some nonresident fields. */
if (attr->name_len &&
le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
le16_to_cpu(attr->nres.run_off)) {
@ -290,9 +286,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
}
/*
* mi_find_attr
*
* finds the attribute by type and name and id
* mi_find_attr - Find the attribute by type and name and id.
*/
struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
enum ATTR_TYPE type, const __le16 *name,
@ -372,7 +366,7 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
} else if (mi_read(mi, is_mft)) {
;
} else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
/* Record is reused. Update its sequence number */
/* Record is reused. Update its sequence number. */
seq = le16_to_cpu(rec->seq) + 1;
if (!seq)
seq = 1;
@ -404,9 +398,7 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
}
/*
* mi_mark_free
*
* marks record as unused and marks it as free in bitmap
* mi_mark_free - Mark record as unused and marks it as free in bitmap.
*/
void mi_mark_free(struct mft_inode *mi)
{
@ -428,10 +420,9 @@ void mi_mark_free(struct mft_inode *mi)
}
/*
* mi_insert_attr
* mi_insert_attr - Reserve space for new attribute.
*
* reserves space for new attribute
* returns not full constructed attribute or NULL if not possible to create
* Return: Not full constructed attribute or NULL if not possible to create.
*/
struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
const __le16 *name, u8 name_len, u32 asize,
@ -468,7 +459,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
}
if (!attr) {
tail = 8; /* not used, just to suppress warning */
tail = 8; /* Not used, just to suppress warning. */
attr = Add2Ptr(rec, used - 8);
} else {
tail = used - PtrOffset(rec, attr);
@ -494,10 +485,9 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
}
/*
* mi_remove_attr
* mi_remove_attr - Remove the attribute from record.
*
* removes the attribute from record
* NOTE: The source attr will point to next attribute
* NOTE: The source attr will point to next attribute.
*/
bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr)
{
@ -543,7 +533,7 @@ bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
if (used + dsize > total)
return false;
nsize = asize + dsize;
// move tail
/* Move tail */
memmove(next + dsize, next, tail);
memset(next, 0, dsize);
used += dsize;
@ -585,10 +575,10 @@ int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
u32 tail = used - aoff - asize;
u32 dsize = sbi->record_size - used;
/* Make a maximum gap in current record */
/* Make a maximum gap in current record. */
memmove(next + dsize, next, tail);
/* Pack as much as possible */
/* Pack as much as possible. */
err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
&plen);
if (err < 0) {

View File

@ -16,22 +16,21 @@
#include "ntfs.h"
#include "ntfs_fs.h"
/* runs_tree is a continues memory. Try to avoid big size */
/* runs_tree is a continues memory. Try to avoid big size. */
#define NTFS3_RUN_MAX_BYTES 0x10000
struct ntfs_run {
CLST vcn; /* virtual cluster number */
CLST len; /* length in clusters */
CLST lcn; /* logical cluster number */
CLST vcn; /* Virtual cluster number. */
CLST len; /* Length in clusters. */
CLST lcn; /* Logical cluster number. */
};
/*
* run_lookup
* run_lookup - Lookup the index of a MCB entry that is first <= vcn.
*
* Lookup the index of a MCB entry that is first <= vcn.
* case of success it will return non-zero value and set
* 'index' parameter to index of entry been found.
* case of entry missing from list 'index' will be set to
* Case of success it will return non-zero value and set
* @index parameter to index of entry been found.
* Case of entry missing from list 'index' will be set to
* point to insertion position for the entry question.
*/
bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
@ -47,7 +46,7 @@ bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
min_idx = 0;
max_idx = run->count - 1;
/* Check boundary cases specially, 'cause they cover the often requests */
/* Check boundary cases specially, 'cause they cover the often requests. */
r = run->runs;
if (vcn < r->vcn) {
*index = 0;
@ -91,9 +90,7 @@ bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
}
/*
* run_consolidate
*
* consolidate runs starting from a given one.
* run_consolidate - Consolidate runs starting from a given one.
*/
static void run_consolidate(struct runs_tree *run, size_t index)
{
@ -164,7 +161,11 @@ remove_next_range:
}
}
/* returns true if range [svcn - evcn] is mapped*/
/*
* run_is_mapped_full
*
* Return: True if range [svcn - evcn] is mapped.
*/
bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
{
size_t i;
@ -224,9 +225,7 @@ bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
}
/*
* run_truncate_head
*
* decommit the range before vcn
* run_truncate_head - Decommit the range before vcn.
*/
void run_truncate_head(struct runs_tree *run, CLST vcn)
{
@ -261,9 +260,7 @@ void run_truncate_head(struct runs_tree *run, CLST vcn)
}
/*
* run_truncate
*
* decommit the range after vcn
* run_truncate - Decommit the range after vcn.
*/
void run_truncate(struct runs_tree *run, CLST vcn)
{
@ -285,13 +282,13 @@ void run_truncate(struct runs_tree *run, CLST vcn)
}
/*
* At this point 'index' is set to
* position that should be thrown away (including index itself)
* At this point 'index' is set to position that
* should be thrown away (including index itself)
* Simple one - just set the limit.
*/
run->count = index;
/* Do not reallocate array 'runs'. Only free if possible */
/* Do not reallocate array 'runs'. Only free if possible. */
if (!index) {
kvfree(run->runs);
run->runs = NULL;
@ -299,7 +296,9 @@ void run_truncate(struct runs_tree *run, CLST vcn)
}
}
/* trim head and tail if necessary*/
/*
* run_truncate_around - Trim head and tail if necessary.
*/
void run_truncate_around(struct runs_tree *run, CLST vcn)
{
run_truncate_head(run, vcn);
@ -311,9 +310,10 @@ void run_truncate_around(struct runs_tree *run, CLST vcn)
/*
* run_add_entry
*
* sets location to known state.
* run to be added may overlap with existing location.
* returns false if of memory
* Sets location to known state.
* Run to be added may overlap with existing location.
*
* Return: false if of memory.
*/
bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
bool is_mft)
@ -336,7 +336,7 @@ bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
* Shortcut here would be case of
* range not been found but one been added
* continues previous run.
* this case I can directly make use of
* This case I can directly make use of
* existing range as my start point.
*/
if (!inrange && index > 0) {
@ -367,13 +367,13 @@ requires_new_range:
/*
* Check allocated space.
* If one is not enough to get one more entry
* then it will be reallocated
* then it will be reallocated.
*/
if (run->allocated < used + sizeof(struct ntfs_run)) {
size_t bytes;
struct ntfs_run *new_ptr;
/* Use power of 2 for 'bytes'*/
/* Use power of 2 for 'bytes'. */
if (!used) {
bytes = 64;
} else if (used <= 16 * PAGE_SIZE) {
@ -421,10 +421,10 @@ requires_new_range:
r = run->runs + index;
/*
* If one of ranges was not allocated
* then I have to split location I just matched.
* and insert current one
* a common case this requires tail to be reinserted
* If one of ranges was not allocated then we
* have to split location we just matched and
* insert current one.
* A common case this requires tail to be reinserted
* a recursive call.
*/
if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
@ -449,12 +449,12 @@ requires_new_range:
goto requires_new_range;
}
/* lcn should match one I'm going to add. */
/* lcn should match one were going to add. */
r->lcn = lcn;
}
/*
* If existing range fits then I'm done.
* If existing range fits then were done.
* Otherwise extend found one and fall back to range jocode.
*/
if (r->vcn + r->len < vcn + len)
@ -473,8 +473,8 @@ requires_new_range:
run_consolidate(run, index + 1);
/*
* a special case
* I have to add extra range a tail.
* A special case.
* We have to add extra range a tail.
*/
if (should_add_tail &&
!run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
@ -483,7 +483,11 @@ requires_new_range:
return true;
}
/*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
/* run_collapse_range
*
* Helper for attr_collapse_range(),
* which is helper for fallocate(collapse_range).
*/
bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
{
size_t index, eat;
@ -491,7 +495,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
CLST end;
if (WARN_ON(!run_lookup(run, vcn, &index)))
return true; /* should never be here */
return true; /* Should never be here. */
e = run->runs + run->count;
r = run->runs + index;
@ -499,13 +503,13 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
if (vcn > r->vcn) {
if (r->vcn + r->len <= end) {
/* collapse tail of run */
/* Collapse tail of run .*/
r->len = vcn - r->vcn;
} else if (r->lcn == SPARSE_LCN) {
/* collapse a middle part of sparsed run */
/* Collapse a middle part of sparsed run. */
r->len -= len;
} else {
/* collapse a middle part of normal run, split */
/* Collapse a middle part of normal run, split. */
if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
return false;
return run_collapse_range(run, vcn, len);
@ -526,7 +530,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
}
if (r->vcn + r->len <= end) {
/* eat this run */
/* Eat this run. */
eat_end = r + 1;
continue;
}
@ -546,9 +550,7 @@ bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
}
/*
* run_get_entry
*
* returns index-th mapped region
* run_get_entry - Return index-th mapped region.
*/
bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
CLST *lcn, CLST *len)
@ -573,9 +575,7 @@ bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
}
/*
* run_packed_size
*
* calculates the size of packed int64
* run_packed_size - Calculate the size of packed int64.
*/
#ifdef __BIG_ENDIAN
static inline int run_packed_size(const s64 n)
@ -605,7 +605,7 @@ static inline int run_packed_size(const s64 n)
return (const u8 *)&n + sizeof(n) - p;
}
/* full trusted function. It does not check 'size' for errors */
/* Full trusted function. It does not check 'size' for errors. */
static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
{
const u8 *p = (u8 *)&v;
@ -637,7 +637,7 @@ static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
}
}
/* full trusted function. It does not check 'size' for errors */
/* Full trusted function. It does not check 'size' for errors. */
static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
{
u8 *p = (u8 *)&v;
@ -700,12 +700,12 @@ static inline int run_packed_size(const s64 n)
return 1 + p - (const u8 *)&n;
}
/* full trusted function. It does not check 'size' for errors */
/* Full trusted function. It does not check 'size' for errors. */
static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
{
const u8 *p = (u8 *)&v;
/* memcpy( run_buf, &v, size); is it faster? */
/* memcpy( run_buf, &v, size); Is it faster? */
switch (size) {
case 8:
run_buf[7] = p[7];
@ -738,7 +738,7 @@ static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
{
u8 *p = (u8 *)&v;
/* memcpy( &v, run_buf, size); is it faster? */
/* memcpy( &v, run_buf, size); Is it faster? */
switch (size) {
case 8:
p[7] = run_buf[7];
@ -769,11 +769,10 @@ static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
#endif
/*
* run_pack
* run_pack - Pack runs into buffer.
*
* packs runs into buffer
* packed_vcns - how much runs we have packed
* packed_size - how much bytes we have used run_buf
* packed_vcns - How much runs we have packed.
* packed_size - How much bytes we have used run_buf.
*/
int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
u32 run_buf_size, CLST *packed_vcns)
@ -807,10 +806,10 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
if (next_vcn > evcn1)
len = evcn1 - vcn;
/* how much bytes required to pack len */
/* How much bytes required to pack len. */
size_size = run_packed_size(len);
/* offset_size - how much bytes is packed dlcn */
/* offset_size - How much bytes is packed dlcn. */
if (lcn == SPARSE_LCN) {
offset_size = 0;
dlcn = 0;
@ -825,20 +824,20 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
if (tmp <= 0)
goto out;
/* can we store this entire run */
/* Can we store this entire run. */
if (tmp < size_size)
goto out;
if (run_buf) {
/* pack run header */
/* Pack run header. */
run_buf[0] = ((u8)(size_size | (offset_size << 4)));
run_buf += 1;
/* Pack the length of run */
/* Pack the length of run. */
run_pack_s64(run_buf, size_size, len);
run_buf += size_size;
/* Pack the offset from previous lcn */
/* Pack the offset from previous LCN. */
run_pack_s64(run_buf, offset_size, dlcn);
run_buf += offset_size;
}
@ -858,7 +857,7 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
}
out:
/* Store last zero */
/* Store last zero. */
if (run_buf)
run_buf[0] = 0;
@ -869,10 +868,9 @@ error:
}
/*
* run_unpack
* run_unpack - Unpack packed runs from @run_buf.
*
* unpacks packed runs from "run_buf"
* returns error, if negative, or real used bytes
* Return: Error if negative, or real used bytes.
*/
int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
@ -882,7 +880,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
const u8 *run_last, *run_0;
bool is_mft = ino == MFT_REC_MFT;
/* Check for empty */
/* Check for empty. */
if (evcn + 1 == svcn)
return 0;
@ -894,12 +892,12 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
prev_lcn = 0;
vcn64 = svcn;
/* Read all runs the chain */
/* size_size - how much bytes is packed len */
/* Read all runs the chain. */
/* size_size - How much bytes is packed len. */
while (run_buf < run_last) {
/* size_size - how much bytes is packed len */
/* size_size - How much bytes is packed len. */
u8 size_size = *run_buf & 0xF;
/* offset_size - how much bytes is packed dlcn */
/* offset_size - How much bytes is packed dlcn. */
u8 offset_size = *run_buf++ >> 4;
u64 len;
@ -908,8 +906,8 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
/*
* Unpack runs.
* NOTE: runs are stored little endian order
* "len" is unsigned value, "dlcn" is signed
* NOTE: Runs are stored little endian order
* "len" is unsigned value, "dlcn" is signed.
* Large positive number requires to store 5 bytes
* e.g.: 05 FF 7E FF FF 00 00 00
*/
@ -917,7 +915,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
return -EINVAL;
len = run_unpack_s64(run_buf, size_size, 0);
/* skip size_size */
/* Skip size_size. */
run_buf += size_size;
if (!len)
@ -928,10 +926,10 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
else if (offset_size <= 8) {
s64 dlcn;
/* initial value of dlcn is -1 or 0 */
/* Initial value of dlcn is -1 or 0. */
dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
/* skip offset_size */
/* Skip offset_size. */
run_buf += offset_size;
if (!dlcn)
@ -942,7 +940,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
return -EINVAL;
next_vcn = vcn64 + len;
/* check boundary */
/* Check boundary. */
if (next_vcn > evcn + 1)
return -EINVAL;
@ -958,14 +956,17 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
}
#endif
if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
/* lcn range is out of volume */
/* LCN range is out of volume. */
return -EINVAL;
}
if (!run)
; /* called from check_attr(fslog.c) to check run */
; /* Called from check_attr(fslog.c) to check run. */
else if (run == RUN_DEALLOCATE) {
/* called from ni_delete_all to free clusters without storing in run */
/*
* Called from ni_delete_all to free clusters
* without storing in run.
*/
if (lcn != SPARSE_LCN64)
mark_as_free_ex(sbi, lcn, len, true);
} else if (vcn64 >= vcn) {
@ -983,7 +984,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
}
if (vcn64 != evcn + 1) {
/* not expected length of unpacked runs */
/* Not expected length of unpacked runs. */
return -EINVAL;
}
@ -992,11 +993,11 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
#ifdef NTFS3_CHECK_FREE_CLST
/*
* run_unpack_ex
* run_unpack_ex - Unpack packed runs from "run_buf".
*
* unpacks packed runs from "run_buf"
* checks unpacked runs to be used in bitmap
* returns error, if negative, or real used bytes
* Checks unpacked runs to be used in bitmap.
*
* Return: Error if negative, or real used bytes.
*/
int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
@ -1036,17 +1037,17 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
continue;
down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
/* Check for free blocks */
/* Check for free blocks. */
ok = wnd_is_used(wnd, lcn, len);
up_read(&wnd->rw_lock);
if (ok)
continue;
/* Looks like volume is corrupted */
/* Looks like volume is corrupted. */
ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
if (down_write_trylock(&wnd->rw_lock)) {
/* mark all zero bits as used in range [lcn, lcn+len) */
/* Mark all zero bits as used in range [lcn, lcn+len). */
CLST i, lcn_f = 0, len_f = 0;
err = 0;
@ -1079,8 +1080,8 @@ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
/*
* run_get_highest_vcn
*
* returns the highest vcn from a mapping pairs array
* it used while replaying log file
* Return the highest vcn from a mapping pairs array
* it used while replaying log file.
*/
int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
{

View File

@ -7,15 +7,15 @@
* terminology
*
* cluster - allocation unit - 512,1K,2K,4K,...,2M
* vcn - virtual cluster number - offset inside the file in clusters
* vbo - virtual byte offset - offset inside the file in bytes
* lcn - logical cluster number - 0 based cluster in clusters heap
* lbo - logical byte offset - absolute position inside volume
* run - maps vcn to lcn - stored in attributes in packed form
* attr - attribute segment - std/name/data etc records inside MFT
* mi - mft inode - one MFT record(usually 1024 bytes or 4K), consists of attributes
* ni - ntfs inode - extends linux inode. consists of one or more mft inodes
* index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size
* vcn - virtual cluster number - Offset inside the file in clusters.
* vbo - virtual byte offset - Offset inside the file in bytes.
* lcn - logical cluster number - 0 based cluster in clusters heap.
* lbo - logical byte offset - Absolute position inside volume.
* run - maps VCN to LCN - Stored in attributes in packed form.
* attr - attribute segment - std/name/data etc records inside MFT.
* mi - MFT inode - One MFT record(usually 1024 bytes or 4K), consists of attributes.
* ni - NTFS inode - Extends linux inode. consists of one or more mft inodes.
* index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size.
*
* WSL - Windows Subsystem for Linux
* https://docs.microsoft.com/en-us/windows/wsl/file-permissions
@ -45,7 +45,8 @@
#ifdef CONFIG_PRINTK
/*
* Trace warnings/notices/errors
* ntfs_printk - Trace warnings/notices/errors.
*
* Thanks Joe Perches <joe@perches.com> for implementation
*/
void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
@ -55,7 +56,7 @@ void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
int level;
struct ntfs_sb_info *sbi = sb->s_fs_info;
/*should we use different ratelimits for warnings/notices/errors? */
/* Should we use different ratelimits for warnings/notices/errors? */
if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
return;
@ -70,9 +71,13 @@ void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
}
static char s_name_buf[512];
static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'
static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'.
/* print warnings/notices/errors about inode using name or inode number */
/*
* ntfs_inode_printk
*
* Print warnings/notices/errors about inode using name or inode number.
*/
void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
{
struct super_block *sb = inode->i_sb;
@ -85,7 +90,7 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
return;
/* use static allocated buffer, if possible */
/* Use static allocated buffer, if possible. */
name = atomic_dec_and_test(&s_name_buf_cnt)
? s_name_buf
: kmalloc(sizeof(s_name_buf), GFP_NOFS);
@ -98,11 +103,11 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
spin_lock(&de->d_lock);
snprintf(name, name_len, " \"%s\"", de->d_name.name);
spin_unlock(&de->d_lock);
name[name_len] = 0; /* to be sure*/
name[name_len] = 0; /* To be sure. */
} else {
name[0] = 0;
}
dput(de); /* cocci warns if placed in branch "if (de)" */
dput(de); /* Cocci warns if placed in branch "if (de)" */
}
va_start(args, fmt);
@ -125,12 +130,12 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
/*
* Shared memory struct.
*
* on-disk ntfs's upcase table is created by ntfs formatter
* 'upcase' table is 128K bytes of memory
* we should read it into memory when mounting
* Several ntfs volumes likely use the same 'upcase' table
* It is good idea to share in-memory 'upcase' table between different volumes
* Unfortunately winxp/vista/win7 use different upcase tables
* On-disk ntfs's upcase table is created by ntfs formatter.
* 'upcase' table is 128K bytes of memory.
* We should read it into memory when mounting.
* Several ntfs volumes likely use the same 'upcase' table.
* It is good idea to share in-memory 'upcase' table between different volumes.
* Unfortunately winxp/vista/win7 use different upcase tables.
*/
static DEFINE_SPINLOCK(s_shared_lock);
@ -143,8 +148,9 @@ static struct {
/*
* ntfs_set_shared
*
* Returns 'ptr' if pointer was saved in shared memory
* Returns NULL if pointer was not shared
* Return:
* * @ptr - If pointer was saved in shared memory.
* * NULL - If pointer was not shared.
*/
void *ntfs_set_shared(void *ptr, u32 bytes)
{
@ -177,8 +183,9 @@ void *ntfs_set_shared(void *ptr, u32 bytes)
/*
* ntfs_put_shared
*
* Returns 'ptr' if pointer is not shared anymore
* Returns NULL if pointer is still shared
* Return:
* * @ptr - If pointer is not shared anymore.
* * NULL - If pointer is still shared.
*/
void *ntfs_put_shared(void *ptr)
{
@ -353,7 +360,10 @@ static noinline int ntfs_parse_options(struct super_block *sb, char *options,
out:
if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
/* For UTF-8 use utf16s_to_utf8s/utf8s_to_utf16s instead of nls */
/*
* For UTF-8 use utf16s_to_utf8s()/utf8s_to_utf16s()
* instead of NLS.
*/
nls = NULL;
} else if (nls_name[0]) {
nls = load_nls(nls_name);
@ -383,7 +393,7 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *data)
if (data && !orig_data)
return -ENOMEM;
/* Store original options */
/* Store original options. */
memcpy(&old_opts, &sbi->options, sizeof(old_opts));
clear_mount_options(&sbi->options);
memset(&sbi->options, 0, sizeof(sbi->options));
@ -465,7 +475,9 @@ static void init_once(void *foo)
inode_init_once(&ni->vfs_inode);
}
/* noinline to reduce binary size*/
/*
* put_ntfs - Noinline to reduce binary size.
*/
static noinline void put_ntfs(struct ntfs_sb_info *sbi)
{
kfree(sbi->new_rec);
@ -510,7 +522,7 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
/*mark rw ntfs as clear, if possible*/
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
put_ntfs(sbi);
@ -581,7 +593,9 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
/*super_operations::sync_fs*/
/*
* ntfs_sync_fs - super_operations::sync_fs
*/
static int ntfs_sync_fs(struct super_block *sb, int wait)
{
int err = 0, err2;
@ -683,10 +697,12 @@ static const struct export_operations ntfs_export_ops = {
.commit_metadata = ntfs_nfs_commit_metadata,
};
/* Returns Gb,Mb to print with "%u.%02u Gb" */
/*
* format_size_gb - Return Gb,Mb to print with "%u.%02u Gb".
*/
static u32 format_size_gb(const u64 bytes, u32 *mb)
{
/* Do simple right 30 bit shift of 64 bit value */
/* Do simple right 30 bit shift of 64 bit value. */
u64 kbytes = bytes >> 10;
u32 kbytes32 = kbytes;
@ -704,7 +720,9 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
: (1u << (0 - boot->sectors_per_clusters));
}
/* inits internal info from on-disk boot sector*/
/*
* ntfs_init_from_boot - Init internal info from on-disk boot sector.
*/
static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
u64 dev_size)
{
@ -755,14 +773,14 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (mlcn2 * sct_per_clst >= sectors)
goto out;
/* Check MFT record size */
/* Check MFT record size. */
if ((boot->record_size < 0 &&
SECTOR_SIZE > (2U << (-boot->record_size))) ||
(boot->record_size >= 0 && !is_power_of_2(boot->record_size))) {
goto out;
}
/* Check index record size */
/* Check index record size. */
if ((boot->index_size < 0 &&
SECTOR_SIZE > (2U << (-boot->index_size))) ||
(boot->index_size >= 0 && !is_power_of_2(boot->index_size))) {
@ -776,9 +794,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
gb = format_size_gb(fs_size, &mb);
/*
* - Volume formatted and mounted with the same sector size
* - Volume formatted 4K and mounted as 512
* - Volume formatted 512 and mounted as 4K
* - Volume formatted and mounted with the same sector size.
* - Volume formatted 4K and mounted as 512.
* - Volume formatted 512 and mounted as 4K.
*/
if (sbi->sector_size != sector_size) {
ntfs_warn(sb,
@ -820,7 +838,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
sbi->volume.size = sectors << sbi->sector_bits;
/* warning if RAW volume */
/* Warning if RAW volume. */
if (dev_size < fs_size) {
u32 mb0, gb0;
@ -834,7 +852,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
clusters = sbi->volume.size >> sbi->cluster_bits;
#ifndef CONFIG_NTFS3_64BIT_CLUSTER
/* 32 bits per cluster */
/* 32 bits per cluster. */
if (clusters >> 32) {
ntfs_notice(
sb,
@ -872,7 +890,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
/* Maximum size for normal files */
/* Maximum size for normal files. */
sbi->maxbytes = (clusters << sbi->cluster_bits) - 1;
#ifdef CONFIG_NTFS3_64BIT_CLUSTER
@ -880,7 +898,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->maxbytes = -1;
sbi->maxbytes_sparse = -1;
#else
/* Maximum size for sparse file */
/* Maximum size for sparse file. */
sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
#endif
@ -892,7 +910,9 @@ out:
return err;
}
/* try to mount*/
/*
* ntfs_fill_super - Try to mount.
*/
static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
{
int err;
@ -945,7 +965,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
sb_set_blocksize(sb, PAGE_SIZE);
/* parse boot */
/* Parse boot. */
err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
bd_inode->i_size);
if (err)
@ -964,8 +984,8 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
#endif
/*
* Load $Volume. This should be done before LogFile
* 'cause 'sbi->volume.ni' is used 'ntfs_set_state'
* Load $Volume. This should be done before $LogFile
* 'cause 'sbi->volume.ni' is used 'ntfs_set_state'.
*/
ref.low = cpu_to_le32(MFT_REC_VOL);
ref.seq = cpu_to_le16(MFT_REC_VOL);
@ -979,13 +999,13 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
ni = ntfs_i(inode);
/* Load and save label (not necessary) */
/* Load and save label (not necessary). */
attr = ni_find_attr(ni, NULL, NULL, ATTR_LABEL, NULL, 0, NULL, NULL);
if (!attr) {
/* It is ok if no ATTR_LABEL */
} else if (!attr->non_res && !is_attr_ext(attr)) {
/* $AttrDef allows labels to be up to 128 symbols */
/* $AttrDef allows labels to be up to 128 symbols. */
err = utf16s_to_utf8s(resident_data(attr),
le32_to_cpu(attr->res.data_size) >> 1,
UTF16_LITTLE_ENDIAN, sbi->volume.label,
@ -993,7 +1013,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
if (err < 0)
sbi->volume.label[0] = 0;
} else {
/* should we break mounting here? */
/* Should we break mounting here? */
//err = -EINVAL;
//goto out;
}
@ -1017,7 +1037,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->volume.ni = ni;
inode = NULL;
/* Load $MFTMirr to estimate recs_mirr */
/* Load $MFTMirr to estimate recs_mirr. */
ref.low = cpu_to_le32(MFT_REC_MIRR);
ref.seq = cpu_to_le16(MFT_REC_MIRR);
inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
@ -1033,7 +1053,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
iput(inode);
/* Load LogFile to replay */
/* Load $LogFile to replay. */
ref.low = cpu_to_le32(MFT_REC_LOG);
ref.seq = cpu_to_le16(MFT_REC_LOG);
inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
@ -1072,7 +1092,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
}
}
/* Load $MFT */
/* Load $MFT. */
ref.low = cpu_to_le32(MFT_REC_MFT);
ref.seq = cpu_to_le16(1);
@ -1100,7 +1120,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->mft.ni = ni;
/* Load $BadClus */
/* Load $BadClus. */
ref.low = cpu_to_le32(MFT_REC_BADCLUST);
ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
@ -1125,7 +1145,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
iput(inode);
/* Load $Bitmap */
/* Load $Bitmap. */
ref.low = cpu_to_le32(MFT_REC_BITMAP);
ref.seq = cpu_to_le16(MFT_REC_BITMAP);
inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
@ -1145,14 +1165,14 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
}
#endif
/* Check bitmap boundary */
/* Check bitmap boundary. */
tt = sbi->used.bitmap.nbits;
if (inode->i_size < bitmap_size(tt)) {
err = -EINVAL;
goto out;
}
/* Not necessary */
/* Not necessary. */
sbi->used.bitmap.set_tail = true;
err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
if (err)
@ -1160,12 +1180,12 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
iput(inode);
/* Compute the mft zone */
/* Compute the MFT zone. */
err = ntfs_refresh_zone(sbi);
if (err)
goto out;
/* Load $AttrDef */
/* Load $AttrDef. */
ref.low = cpu_to_le32(MFT_REC_ATTR);
ref.seq = cpu_to_le16(MFT_REC_ATTR);
inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
@ -1229,7 +1249,7 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
}
iput(inode);
/* Load $UpCase */
/* Load $UpCase. */
ref.low = cpu_to_le32(MFT_REC_UPCASE);
ref.seq = cpu_to_le16(MFT_REC_UPCASE);
inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
@ -1284,29 +1304,29 @@ static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
inode = NULL;
if (is_ntfs3(sbi)) {
/* Load $Secure */
/* Load $Secure. */
err = ntfs_security_init(sbi);
if (err)
goto out;
/* Load $Extend */
/* Load $Extend. */
err = ntfs_extend_init(sbi);
if (err)
goto load_root;
/* Load $Extend\$Reparse */
/* Load $Extend\$Reparse. */
err = ntfs_reparse_init(sbi);
if (err)
goto load_root;
/* Load $Extend\$ObjId */
/* Load $Extend\$ObjId. */
err = ntfs_objid_init(sbi);
if (err)
goto load_root;
}
load_root:
/* Load root */
/* Load root. */
ref.low = cpu_to_le32(MFT_REC_ROOT);
ref.seq = cpu_to_le16(MFT_REC_ROOT);
inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
@ -1369,9 +1389,7 @@ void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len)
}
/*
* ntfs_discard
*
* issue a discard request (trim for SSD)
* ntfs_discard - Issue a discard request (trim for SSD).
*/
int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
{
@ -1391,10 +1409,10 @@ int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
lbo = (u64)lcn << sbi->cluster_bits;
bytes = (u64)len << sbi->cluster_bits;
/* Align up 'start' on discard_granularity */
/* Align up 'start' on discard_granularity. */
start = (lbo + sbi->discard_granularity - 1) &
sbi->discard_granularity_mask_inv;
/* Align down 'end' on discard_granularity */
/* Align down 'end' on discard_granularity. */
end = (lbo + bytes) & sbi->discard_granularity_mask_inv;
sb = sbi->sb;
@ -1443,7 +1461,7 @@ static int __init init_ntfs_fs(void)
pr_notice("ntfs3: Activated 32 bits per cluster\n");
#endif
#ifdef CONFIG_NTFS3_LZX_XPRESS
pr_notice("ntfs3: Read-only lzx/xpress compression included\n");
pr_notice("ntfs3: Read-only LZX/Xpress compression included\n");
#endif
err = ntfs3_init_bitmap();

View File

@ -4,6 +4,7 @@
* Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
*
*/
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/module.h>
@ -25,14 +26,16 @@ static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
}
/*
* ntfs_cmp_names
*
* Thanks Kari Argillander <kari.argillander@gmail.com> for idea and implementation 'bothcase'
*
* Straight way to compare names:
* - case insensitive
* - if name equals and 'bothcases' then
* - case sensitive
* 'Straigth way' code scans input names twice in worst case
* Optimized code scans input names only once
* - Case insensitive
* - If name equals and 'bothcases' then
* - Case sensitive
* 'Straigth way' code scans input names twice in worst case.
* Optimized code scans input names only once.
*/
int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
const u16 *upcase, bool bothcase)

View File

@ -41,7 +41,7 @@ static inline size_t packed_ea_size(const struct EA_FULL *ea)
/*
* find_ea
*
* assume there is at least one xattr in the list
* Assume there is at least one xattr in the list.
*/
static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
const char *name, u8 name_len, u32 *off)
@ -69,11 +69,9 @@ static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
}
/*
* ntfs_read_ea
*
* reads all extended attributes
* ea - new allocated memory
* info - pointer into resident data
* ntfs_read_ea - Read all extended attributes.
* @ea: New allocated memory.
* @info: Pointer into resident data.
*/
static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
size_t add_bytes, const struct EA_INFO **info)
@ -101,7 +99,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
if (!*info)
return -EINVAL;
/* Check Ea limit */
/* Check Ea limit. */
size = le32_to_cpu((*info)->size);
if (size > ni->mi.sbi->ea_max_size)
return -EFBIG;
@ -109,7 +107,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
return -EFBIG;
/* Allocate memory for packed Ea */
/* Allocate memory for packed Ea. */
ea_p = kmalloc(size + add_bytes, GFP_NOFS);
if (!ea_p)
return -ENOMEM;
@ -150,11 +148,12 @@ out:
/*
* ntfs_list_ea
*
* copy a list of xattrs names into the buffer
* provided, or compute the buffer size required
* Copy a list of xattrs names into the buffer
* provided, or compute the buffer size required.
*
* Returns a negative error number on failure, or the number of bytes
* used / required on success.
* Return:
* * Number of bytes used / required on
* * -ERRNO - on failure
*/
static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
size_t bytes_per_buffer)
@ -175,7 +174,7 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
size = le32_to_cpu(info->size);
/* Enumerate all xattrs */
/* Enumerate all xattrs. */
for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
ea = Add2Ptr(ea_all, off);
@ -227,7 +226,7 @@ static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
if (!info)
goto out;
/* Enumerate all xattrs */
/* Enumerate all xattrs. */
if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
err = -ENODATA;
goto out;
@ -322,11 +321,11 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
*/
if (val_size && le16_to_cpu(ea->elength) == val_size &&
!memcmp(ea->name + ea->name_len + 1, value, val_size)) {
/* xattr already contains the required value */
/* xattr already contains the required value. */
goto out;
}
/* Remove current xattr */
/* Remove current xattr. */
if (ea->flags & FILE_NEED_EA)
le16_add_cpu(&ea_info.count, -1);
@ -342,7 +341,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
ea_info.size = cpu_to_le32(size);
if ((flags & XATTR_REPLACE) && !val_size) {
/* remove xattr */
/* Remove xattr. */
goto update_ea;
}
} else {
@ -360,7 +359,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
}
}
/* append new xattr */
/* Append new xattr. */
new_ea = Add2Ptr(ea_all, size);
new_ea->size = cpu_to_le32(add);
new_ea->flags = 0;
@ -371,14 +370,14 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
memcpy(new_ea->name + name_len + 1, value, val_size);
new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
/* should fit into 16 bits */
/* Should fit into 16 bits. */
if (new_pack > 0xffff) {
err = -EFBIG; // -EINVAL?
goto out;
}
ea_info.size_pack = cpu_to_le16(new_pack);
/* new size of ATTR_EA */
/* New size of ATTR_EA. */
size += add;
if (size > sbi->ea_max_size) {
err = -EFBIG; // -EINVAL?
@ -389,7 +388,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name,
update_ea:
if (!info) {
/* Create xattr */
/* Create xattr. */
if (!size) {
err = 0;
goto out;
@ -419,7 +418,7 @@ update_ea:
}
if (!size) {
/* delete xattr, ATTR_EA_INFO */
/* Delete xattr, ATTR_EA_INFO */
err = ni_remove_attr_le(ni, attr, le);
if (err)
goto out;
@ -441,7 +440,7 @@ update_ea:
}
if (!size) {
/* delete xattr, ATTR_EA */
/* Delete xattr, ATTR_EA */
err = ni_remove_attr_le(ni, attr, le);
if (err)
goto out;
@ -459,7 +458,7 @@ update_ea:
mi->dirty = true;
}
/* Check if we delete the last xattr */
/* Check if we delete the last xattr. */
if (size)
ni->ni_flags |= NI_FLAG_EA;
else
@ -498,12 +497,12 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
int err;
void *buf;
/* allocate PATH_MAX bytes */
/* Allocate PATH_MAX bytes. */
buf = __getname();
if (!buf)
return ERR_PTR(-ENOMEM);
/* Possible values of 'type' was already checked above */
/* Possible values of 'type' was already checked above. */
if (type == ACL_TYPE_ACCESS) {
name = XATTR_NAME_POSIX_ACL_ACCESS;
name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
@ -520,7 +519,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
if (!locked)
ni_unlock(ni);
/* Translate extended attribute to acl */
/* Translate extended attribute to acl. */
if (err >= 0) {
acl = posix_acl_from_xattr(mnt_userns, buf, err);
if (!IS_ERR(acl))
@ -535,9 +534,7 @@ static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
}
/*
* ntfs_get_acl
*
* inode_operations::get_acl
* ntfs_get_acl - inode_operations::get_acl
*/
struct posix_acl *ntfs_get_acl(struct inode *inode, int type)
{
@ -573,8 +570,8 @@ static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
if (!err) {
/*
* acl can be exactly represented in the
* traditional file mode permission bits
* ACL can be exactly represented in the
* traditional file mode permission bits.
*/
acl = NULL;
}
@ -620,9 +617,7 @@ out:
}
/*
* ntfs_set_acl
*
* inode_operations::set_acl
* ntfs_set_acl - inode_operations::set_acl
*/
int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
struct posix_acl *acl, int type)
@ -688,7 +683,9 @@ release_and_out:
}
/*
* Initialize the ACLs of a new inode. Called from ntfs_create_inode.
* ntfs_init_acl - Initialize the ACLs of a new inode.
*
* Called from ntfs_create_inode().
*/
int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
struct inode *dir)
@ -697,7 +694,7 @@ int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
int err;
/*
* TODO refactoring lock
* TODO: Refactoring lock.
* ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
*/
inode->i_default_acl = NULL;
@ -749,9 +746,7 @@ out:
#endif
/*
* ntfs_acl_chmod
*
* helper for 'ntfs3_setattr'
* ntfs_acl_chmod - Helper for ntfs3_setattr().
*/
int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
{
@ -767,15 +762,13 @@ int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
}
/*
* ntfs_permission
*
* inode_operations::permission
* ntfs_permission - inode_operations::permission
*/
int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
int mask)
{
if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
/* "no access rules" mode - allow all changes */
/* "No access rules" mode - Allow all changes. */
return 0;
}
@ -783,9 +776,7 @@ int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
}
/*
* ntfs_listxattr
*
* inode_operations::listxattr
* ntfs_listxattr - inode_operations::listxattr
*/
ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
@ -815,7 +806,7 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
struct ntfs_inode *ni = ntfs_i(inode);
size_t name_len = strlen(name);
/* Dispatch request */
/* Dispatch request. */
if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
!memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
/* system.dos_attrib */
@ -851,7 +842,7 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
size_t sd_size = 0;
if (!is_ntfs3(ni->mi.sbi)) {
/* we should get nt4 security */
/* We should get nt4 security. */
err = -EINVAL;
goto out;
} else if (le32_to_cpu(ni->std_security_id) <
@ -901,7 +892,7 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
goto out;
}
#endif
/* deal with ntfs extended attribute */
/* Deal with NTFS extended attribute. */
err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
out:
@ -909,9 +900,7 @@ out:
}
/*
* ntfs_setxattr
*
* inode_operations::setxattr
* ntfs_setxattr - inode_operations::setxattr
*/
static noinline int ntfs_setxattr(const struct xattr_handler *handler,
struct user_namespace *mnt_userns,
@ -924,7 +913,7 @@ static noinline int ntfs_setxattr(const struct xattr_handler *handler,
size_t name_len = strlen(name);
enum FILE_ATTRIBUTE new_fa;
/* Dispatch request */
/* Dispatch request. */
if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
!memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
if (sizeof(u8) != size)
@ -940,7 +929,7 @@ static noinline int ntfs_setxattr(const struct xattr_handler *handler,
new_fa = cpu_to_le32(*(u32 *)value);
if (S_ISREG(inode->i_mode)) {
/* Process compressed/sparsed in special way*/
/* Process compressed/sparsed in special way. */
ni_lock(ni);
err = ni_new_attr_flags(ni, new_fa);
ni_unlock(ni);
@ -950,7 +939,7 @@ static noinline int ntfs_setxattr(const struct xattr_handler *handler,
set_new_fa:
/*
* Thanks Mark Harmstone:
* keep directory bit consistency
* Keep directory bit consistency.
*/
if (S_ISDIR(inode->i_mode))
new_fa |= FILE_ATTRIBUTE_DIRECTORY;
@ -963,7 +952,7 @@ set_new_fa:
inode->i_mode &= ~0222;
else
inode->i_mode |= 0222;
/* std attribute always in primary record */
/* Std attribute always in primary record. */
ni->mi.dirty = true;
mark_inode_dirty(inode);
}
@ -981,8 +970,8 @@ set_new_fa:
if (!is_ntfs3(ni->mi.sbi)) {
/*
* we should replace ATTR_SECURE
* Skip this way cause it is nt4 feature
* We should replace ATTR_SECURE.
* Skip this way cause it is nt4 feature.
*/
err = -EINVAL;
goto out;
@ -1007,7 +996,7 @@ set_new_fa:
err = -EINVAL;
} else if (std->security_id != security_id) {
std->security_id = ni->std_security_id = security_id;
/* std attribute always in primary record */
/* Std attribute always in primary record. */
ni->mi.dirty = true;
mark_inode_dirty(&ni->vfs_inode);
}
@ -1031,7 +1020,7 @@ set_new_fa:
goto out;
}
#endif
/* deal with ntfs extended attribute */
/* Deal with NTFS extended attribute. */
err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
out: