Merge pull request #1099 from littlefs-project/fix-remove-double-deorphan

Fix double deorphan caused by relocation mid dir remove
This commit is contained in:
Christopher Haster 2025-05-13 00:44:26 -05:00 committed by GitHub
commit d73fb8ef3c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 243 additions and 4 deletions

10
lfs.c
View File

@ -3932,7 +3932,9 @@ static int lfs_remove_(lfs_t *lfs, const char *path) {
}
lfs->mlist = dir.next;
if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
if (lfs_gstate_hasorphans(&lfs->gstate)) {
LFS_ASSERT(lfs_tag_type3(tag) == LFS_TYPE_DIR);
// fix orphan
err = lfs_fs_preporphans(lfs, -1);
if (err) {
@ -4076,8 +4078,10 @@ static int lfs_rename_(lfs_t *lfs, const char *oldpath, const char *newpath) {
}
lfs->mlist = prevdir.next;
if (prevtag != LFS_ERR_NOENT
&& lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
if (lfs_gstate_hasorphans(&lfs->gstate)) {
LFS_ASSERT(prevtag != LFS_ERR_NOENT
&& lfs_tag_type3(prevtag) == LFS_TYPE_DIR);
// fix orphan
err = lfs_fs_preporphans(lfs, -1);
if (err) {

View File

@ -207,7 +207,8 @@ code = '''
[cases.test_orphans_reentrant]
reentrant = true
# TODO fix this case, caused by non-DAG trees
if = '!(DEPTH == 3 && CACHE_SIZE != 64)'
# NOTE the second condition is required
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
defines = [
{FILES=6, DEPTH=1, CYCLES=20},
{FILES=26, DEPTH=1, CYCLES=20},
@ -271,3 +272,69 @@ code = '''
lfs_unmount(&lfs) => 0;
'''
# non-reentrant testing for orphans, this is the same as reentrant
# testing, but we test way more states than we could under powerloss
[cases.test_orphans_nonreentrant]
# TODO fix this case, caused by non-DAG trees
# NOTE the second condition is required
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
defines = [
{FILES=6, DEPTH=1, CYCLES=2000},
{FILES=26, DEPTH=1, CYCLES=2000},
{FILES=3, DEPTH=3, CYCLES=2000},
]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
int err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
} else {
// is valid dir?
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
// try to delete path in reverse order, ignore if dir is not empty
for (int d = DEPTH-1; d >= 0; d--) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
int err = lfs_remove(&lfs, path);
assert(!err || err == LFS_ERR_NOTEMPTY);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
}
}
lfs_unmount(&lfs) => 0;
'''

View File

@ -341,3 +341,171 @@ code = '''
}
lfs_unmount(&lfs) => 0;
'''
# non-reentrant testing for orphans, this is the same as reentrant
# testing, but we test way more states than we could under powerloss
[cases.test_relocations_nonreentrant]
# TODO fix this case, caused by non-DAG trees
# NOTE the second condition is required
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
defines = [
{FILES=6, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=2000, BLOCK_CYCLES=1},
]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
int err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
} else {
// is valid dir?
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
// try to delete path in reverse order, ignore if dir is not empty
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
int err = lfs_remove(&lfs, path);
assert(!err || err == LFS_ERR_NOTEMPTY);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
}
}
lfs_unmount(&lfs) => 0;
'''
# non-reentrant testing for relocations, but now with random renames!
[cases.test_relocations_nonreentrant_renames]
# TODO fix this case, caused by non-DAG trees
# NOTE the second condition is required
if = '!(DEPTH == 3 && CACHE_SIZE != 64) && 2*FILES < BLOCK_COUNT'
defines = [
{FILES=6, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
{FILES=26, DEPTH=1, CYCLES=2000, BLOCK_CYCLES=1},
{FILES=3, DEPTH=3, CYCLES=2000, BLOCK_CYCLES=1},
]
code = '''
lfs_t lfs;
lfs_format(&lfs, cfg) => 0;
lfs_mount(&lfs, cfg) => 0;
uint32_t prng = 1;
const char alpha[] = "abcdefghijklmnopqrstuvwxyz";
for (unsigned i = 0; i < CYCLES; i++) {
// create random path
char full_path[256];
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&full_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if it does not exist, we create it, else we destroy
struct lfs_info info;
int res = lfs_stat(&lfs, full_path, &info);
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// create each directory in turn, ignore if dir already exists
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
int err = lfs_mkdir(&lfs, path);
assert(!err || err == LFS_ERR_EXIST);
}
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
} else {
assert(strcmp(info.name, &full_path[2*(DEPTH-1)+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
// create new random path
char new_path[256];
for (unsigned d = 0; d < DEPTH; d++) {
sprintf(&new_path[2*d], "/%c", alpha[TEST_PRNG(&prng) % FILES]);
}
// if new path does not exist, rename, otherwise destroy
res = lfs_stat(&lfs, new_path, &info);
assert(!res || res == LFS_ERR_NOENT);
if (res == LFS_ERR_NOENT) {
// stop once some dir is renamed
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(&path[2*d], &full_path[2*d]);
path[2*d+2] = '\0';
strcpy(&path[128+2*d], &new_path[2*d]);
path[128+2*d+2] = '\0';
int err = lfs_rename(&lfs, path, path+128);
assert(!err || err == LFS_ERR_NOTEMPTY);
if (!err) {
strcpy(path, path+128);
}
}
for (unsigned d = 0; d < DEPTH; d++) {
char path[1024];
strcpy(path, new_path);
path[2*d+2] = '\0';
lfs_stat(&lfs, path, &info) => 0;
assert(strcmp(info.name, &path[2*d+1]) == 0);
assert(info.type == LFS_TYPE_DIR);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
} else {
// try to delete path in reverse order,
// ignore if dir is not empty
for (unsigned d = DEPTH-1; d+1 > 0; d--) {
char path[1024];
strcpy(path, full_path);
path[2*d+2] = '\0';
int err = lfs_remove(&lfs, path);
assert(!err || err == LFS_ERR_NOTEMPTY);
}
lfs_stat(&lfs, full_path, &info) => LFS_ERR_NOENT;
}
}
}
lfs_unmount(&lfs) => 0;
'''