gbmap: Added gc_repoplookahead_thresh and gc_repopgbmap_thresh

To allow relaxing when LFS3_I_REPOPLOOKAHEAD and LFS3_I_REPOPGBMAP will
be set, potentially reducing gc workload after allocating only a couple
blocks.

The relevant cfg comments have quite a bit more info.

Note -1 (not the default, 0, maybe we should explicitly flip this?)
restores the previous functionality of setting these flags on the first
block allocation.

---

Also tweaked gbmap repops during gc/traversals to _not_ try to repop
unless LFS3_I_REPOPGBMAP is set. We probably should have done this from
the beginning since repopulating the gbmap writes to disk and is
potentially destructive.

Adds code, though hopefully we can claw this back with future config
rework:

                 code          stack          ctx
  before:       37176           2352          684
  after:        37208 (+0.1%)   2352 (+0.0%)  688 (+0.6%)

                 code          stack          ctx
  gbmap before: 40024           2368          848
  gbmap after:  40120 (+0.2%)   2368 (+0.0%)  856 (+0.9%)
This commit is contained in:
Christopher Haster
2025-10-17 00:32:03 -05:00
parent 1dc1a26f11
commit 12874bff76
5 changed files with 394 additions and 88 deletions

73
lfs3.c
View File

@ -10627,9 +10627,13 @@ static lfs3_stag_t lfs3_mtree_gc(lfs3_t *lfs3, lfs3_mgc_t *mgc,
lfs3_bptr_t *bptr_) {
// start of traversal?
if (lfs3_t_tstate(mgc->t.b.h.flags) == LFS3_TSTATE_MROOTANCHOR) {
// checkpoint the allocator to maximize any lookahead scans
#ifndef LFS3_RDONLY
// checkpoint the allocator to maximize any lookahead scans
//
// note we try to repop even if the repoplookahead flag isn't
// set because there's no real downside
if (lfs3_t_isrepoplookahead(mgc->t.b.h.flags)
&& !lfs3_t_ismtreeonly(mgc->t.b.h.flags)
&& !lfs3_t_isckpointed(mgc->t.b.h.flags)) {
lfs3_alloc_ckpoint_(lfs3);
// keep our own ckpointed flag clear
@ -10640,11 +10644,19 @@ static lfs3_stag_t lfs3_mtree_gc(lfs3_t *lfs3, lfs3_mgc_t *mgc,
#if !defined(LFS3_RDONLY) && defined(LFS3_GBMAP)
// create a new gbmap snapshot
//
// note we _don't_ try to repop if the repopgbmap flag isn't set
// because repopulating the gbmap requires disk writes and is
// potentially destructive
//
// note because we bail as soon as a ckpoint is triggered
// (lfs3_t_isckpointed), we don't need to traverse this
// (lfs3_t_isckpointed), we don't need to include this snapshot
// in traversals, the ckpointed flag also means we don't need to
// worry about this repop condition becoming true later
if (lfs3_t_isrepopgbmap(mgc->t.b.h.flags)
&& lfs3_f_isgbmap(lfs3->flags)
&& !lfs3_t_ismtreeonly(mgc->t.b.h.flags)) {
&& lfs3_t_isrepopgbmap(lfs3->flags)
&& !lfs3_t_ismtreeonly(mgc->t.b.h.flags)
&& !lfs3_t_isckpointed(mgc->t.b.h.flags)) {
// at least checkpoint the lookahead buffer
lfs3_alloc_ckpoint_(lfs3);
@ -10694,6 +10706,7 @@ dropped:;
#ifdef LFS3_GBMAP
if (lfs3_t_isrepopgbmap(mgc->t.b.h.flags)
&& lfs3_f_isgbmap(lfs3->flags)
&& lfs3_t_isrepopgbmap(lfs3->flags)
&& !lfs3_t_ismtreeonly(mgc->t.b.h.flags)
&& !lfs3_t_isckpointed(mgc->t.b.h.flags)) {
int err = lfs3_gbmap_markbptr(lfs3, &mgc->gbmap_, tag, bptr_,
@ -10778,6 +10791,7 @@ eot:;
#ifdef LFS3_GBMAP
if (lfs3_t_isrepopgbmap(mgc->t.b.h.flags)
&& lfs3_f_isgbmap(lfs3->flags)
&& lfs3_t_isrepopgbmap(lfs3->flags)
&& !lfs3_t_ismtreeonly(mgc->t.b.h.flags)
&& !lfs3_t_isckpointed(mgc->t.b.h.flags)) {
lfs3_alloc_adoptgbmap(lfs3, &mgc->gbmap_, lfs3->lookahead.ckpoint);
@ -11266,7 +11280,7 @@ static void lfs3_alloc_adopt(lfs3_t *lfs3, lfs3_block_t known) {
8*lfs3->cfg->lookahead_size,
known);
// signal that lookahead is full, this is cleared on first alloc
// signal that lookahead is full
lfs3->flags &= ~LFS3_I_REPOPLOOKAHEAD;
// eagerly find the next free block so lookahead scans can make
@ -11275,6 +11289,30 @@ static void lfs3_alloc_adopt(lfs3_t *lfs3, lfs3_block_t known) {
}
#endif
// can we repopulate the lookahead buffer?
#if !defined(LFS3_RDONLY)
static inline bool lfs3_alloc_isrepoplookahead(const lfs3_t *lfs3) {
return lfs3->lookahead.known
<= lfs3_min(
lfs3->cfg->gc_repoplookahead_thresh,
lfs3_min(
8*lfs3->cfg->lookahead_size-1,
lfs3->block_count-1));
}
#endif
// can we repopulate the gbmap?
#if !defined(LFS3_RDONLY) && defined(LFS3_GBMAP)
static inline bool lfs3_alloc_isrepopgbmap(const lfs3_t *lfs3) {
return lfs3->gbmap.known
<= lfs3_min(
lfs3_max(
lfs3->cfg->gc_repopgbmap_thresh,
lfs3->cfg->gbmap_repop_thresh),
lfs3->block_count-1);
}
#endif
// increment lookahead buffer
#if !defined(LFS3_RDONLY) && !defined(LFS3_2BONLY)
static void lfs3_alloc_inc(lfs3_t *lfs3) {
@ -11284,9 +11322,6 @@ static void lfs3_alloc_inc(lfs3_t *lfs3) {
lfs3->lookahead.buffer[lfs3->lookahead.off / 8]
&= ~(1 << (lfs3->lookahead.off % 8));
// signal that lookahead is no longer full
lfs3->flags |= LFS3_I_REPOPLOOKAHEAD;
// increment next/off
lfs3->lookahead.off += 1;
if (lfs3->lookahead.off == 8*lfs3->cfg->lookahead_size) {
@ -11301,6 +11336,11 @@ static void lfs3_alloc_inc(lfs3_t *lfs3) {
// decrement ckpoint
lfs3->lookahead.ckpoint -= 1;
// signal that lookahead is no longer full
if (lfs3_alloc_isrepoplookahead(lfs3)) {
lfs3->flags |= LFS3_I_REPOPLOOKAHEAD;
}
// decrement gbmap known window
#ifdef LFS3_GBMAP
if (lfs3_f_isgbmap(lfs3->flags)) {
@ -11308,7 +11348,9 @@ static void lfs3_alloc_inc(lfs3_t *lfs3) {
lfs3->gbmap.known = lfs3_smax(lfs3->gbmap.known-1, 0);
// signal that the gbmap is no longer full
lfs3->flags |= LFS3_I_REPOPGBMAP;
if (lfs3_alloc_isrepopgbmap(lfs3)) {
lfs3->flags |= LFS3_I_REPOPGBMAP;
}
}
#endif
}
@ -11332,7 +11374,7 @@ static lfs3_sblock_t lfs3_alloc_findfree(lfs3_t *lfs3) {
}
#endif
// needed in lfs3_mtree_traverse_
// needed in lfs3_alloc
static inline lfs3_size_t lfs3_graft_count(lfs3_size_t graft_count);
// allocate a block
@ -11456,7 +11498,7 @@ static void lfs3_alloc_adoptgbmap(lfs3_t *lfs3,
lfs3->gbmap.known = known;
lfs3->gbmap.b = *gbmap;
// signal that gbmap is full, this is cleared on first alloc
// signal that gbmap is full
lfs3->flags &= ~LFS3_I_REPOPGBMAP;
}
#endif
@ -16282,13 +16324,12 @@ static int lfs3_mountinited(lfs3_t *lfs3) {
// known gbmap window
lfs3->lookahead.window = lfs3->gbmap.window;
// and mark our gbmap as repopulatable if known window does not
// include the entire disk
// mark our gbmap as repopulatable if known window is
// <= gc_repopgbmap_thresh
//
// unfortunately the use of block allocation during gbmap
// repops means the known window almost never includes the
// entire disk
if (lfs3->gbmap.known < lfs3->block_count) {
// unfortunately the dependency of the gbmap on block allocation
// means this rarely includes the entire disk
if (lfs3_alloc_isrepopgbmap(lfs3)) {
lfs3->flags |= LFS3_I_REPOPGBMAP;
}
#endif

51
lfs3.h
View File

@ -497,13 +497,43 @@ struct lfs3_cfg {
lfs3_soff_t gc_steps;
#endif
// Threshold for metadata compaction during gc in bytes. Metadata logs
// that exceed this threshold will be compacted during gc operations.
// Defaults to ~88% block_size when zero, though this default may change
// in the future.
// Threshold for repopulating the lookahead buffer during gc. This
// can be set lower than the lookahead size to delay gc work when
// only a few blocks have been allocated.
//
// Note this only affects explicit gc operations. Otherwise metadata is
// only compacted when full.
// Note this only affects explicit gc operations. During normal
// operations the lookahead buffer is only repopulated when empty.
//
// 0 only repopulates the lookahead buffer when empty, while -1 or
// any value >= 8*lookahead_size repopulates the lookahead buffer
// after any block allocation.
#ifndef LFS3_RDONLY
lfs3_block_t gc_repoplookahead_thresh;
#endif
// Threshold for repopulating the gbmap during gc. This can be set
// lower than the disk size to delay gc work when only a few blocks
// have been allocated.
//
// Note this only affects explicit gc operations. During normal
// operations gbmap repopulations are controlled by
// gbmap_repop_thresh.
//
// Any value <= gbmap_repop_thresh repopulates the gbmap when below
// gbmap_repop_thresh, while -1 or any value >= block_count
// repopulates the lookahead buffer after any block allocation.
#if !defined(LFS3_RDONLY) && defined(LFS3_GBMAP)
lfs3_block_t gc_repopgbmap_thresh;
#endif
// Threshold for metadata compaction during gc in bytes.
//
// Metadata logs that exceed this threshold will be compacted during
// gc operations. Defaults to ~88% block_size when zero, though this
// default may change in the future.
//
// Note this only affects explicit gc operations. During normal
// operations metadata is only compacted when full.
//
// Set to -1 to disable metadata compaction during gc.
#ifndef LFS3_RDONLY
@ -578,15 +608,16 @@ struct lfs3_cfg {
lfs3_size_t crystal_thresh;
#endif
// Threshold for when to repopulate the global on-disk block-map
// (gbmap). When <= this many blocks have a known state, littlefs
// will traverse the filesystem and attempt to repopulate the gbmap.
// Threshold for repopulating the global on-disk block-map (gbmap).
//
// When <= this many blocks have a known state, littlefs will
// traverse the filesystem and attempt to repopulate the gbmap.
// Smaller values decrease repop frequency and improves overall
// allocator throughput, at the risk of needing to fallback to the
// slower lookahead allocator when empty.
//
// 0 only repopulates the gbmap when empty, minimizing gbmap
// repops but may introduce large latency spikes.
// repops at the risk of large latency spikes.
#ifdef LFS3_GBMAP
lfs3_block_t gbmap_repop_thresh;
#endif

View File

@ -103,29 +103,32 @@ void bench_permutation(size_t i, uint32_t *buffer, size_t size);
// a few preconfigured defines that control how benches run
#define BENCH_IMPLICIT_DEFINES \
/* name value (overridable) */ \
BENCH_DEFINE(READ_SIZE, 1 ) \
BENCH_DEFINE(PROG_SIZE, 1 ) \
BENCH_DEFINE(BLOCK_SIZE, 4096 ) \
BENCH_DEFINE(BLOCK_COUNT, DISK_SIZE/BLOCK_SIZE ) \
BENCH_DEFINE(DISK_SIZE, 1024*1024 ) \
BENCH_DEFINE(BLOCK_RECYCLES, -1 ) \
BENCH_DEFINE(RCACHE_SIZE, LFS3_MAX(16, READ_SIZE) ) \
BENCH_DEFINE(PCACHE_SIZE, LFS3_MAX(16, PROG_SIZE) ) \
BENCH_DEFINE(FILE_CACHE_SIZE, 16 ) \
BENCH_DEFINE(LOOKAHEAD_SIZE, 16 ) \
BENCH_DEFINE(GC_FLAGS, LFS3_GC_ALL ) \
BENCH_DEFINE(GC_STEPS, 0 ) \
BENCH_DEFINE(GC_COMPACTMETA_THRESH, 0 ) \
BENCH_DEFINE(SHRUB_SIZE, BLOCK_SIZE/4 ) \
BENCH_DEFINE(FRAGMENT_SIZE, LFS3_MIN(BLOCK_SIZE/8, 512) ) \
BENCH_DEFINE(CRYSTAL_THRESH, BLOCK_SIZE/8 ) \
BENCH_DEFINE(GBMAP_REPOP_THRESH, BLOCK_COUNT/4 ) \
BENCH_DEFINE(ERASE_VALUE, 0xff ) \
BENCH_DEFINE(ERASE_CYCLES, 0 ) \
BENCH_DEFINE(BADBLOCK_BEHAVIOR, LFS3_EMUBD_BADBLOCK_PROGERROR ) \
BENCH_DEFINE(POWERLOSS_BEHAVIOR, LFS3_EMUBD_POWERLOSS_ATOMIC ) \
BENCH_DEFINE(EMUBD_SEED, 0 )
/* name value (overridable) */ \
BENCH_DEFINE(READ_SIZE, 1 ) \
BENCH_DEFINE(PROG_SIZE, 1 ) \
BENCH_DEFINE(BLOCK_SIZE, 4096 ) \
BENCH_DEFINE(BLOCK_COUNT, DISK_SIZE/BLOCK_SIZE ) \
BENCH_DEFINE(DISK_SIZE, 1024*1024 ) \
BENCH_DEFINE(BLOCK_RECYCLES, -1 ) \
BENCH_DEFINE(RCACHE_SIZE, LFS3_MAX(16, READ_SIZE) ) \
BENCH_DEFINE(PCACHE_SIZE, LFS3_MAX(16, PROG_SIZE) ) \
BENCH_DEFINE(FILE_CACHE_SIZE, 16 ) \
BENCH_DEFINE(LOOKAHEAD_SIZE, 16 ) \
BENCH_DEFINE(GC_FLAGS, LFS3_GC_ALL ) \
BENCH_DEFINE(GC_STEPS, 0 ) \
BENCH_DEFINE(GC_REPOPLOOKAHEAD_THRESH, \
-1 ) \
BENCH_DEFINE(GC_REPOPGBMAP_THRESH, -1 ) \
BENCH_DEFINE(GC_COMPACTMETA_THRESH, 0 ) \
BENCH_DEFINE(SHRUB_SIZE, BLOCK_SIZE/4 ) \
BENCH_DEFINE(FRAGMENT_SIZE, LFS3_MIN(BLOCK_SIZE/8, 512) ) \
BENCH_DEFINE(CRYSTAL_THRESH, BLOCK_SIZE/8 ) \
BENCH_DEFINE(GBMAP_REPOP_THRESH, BLOCK_COUNT/4 ) \
BENCH_DEFINE(ERASE_VALUE, 0xff ) \
BENCH_DEFINE(ERASE_CYCLES, 0 ) \
BENCH_DEFINE(BADBLOCK_BEHAVIOR, LFS3_EMUBD_BADBLOCK_PROGERROR ) \
BENCH_DEFINE(POWERLOSS_BEHAVIOR, LFS3_EMUBD_POWERLOSS_ATOMIC ) \
BENCH_DEFINE(EMUBD_SEED, 0 )
// declare defines as global intmax_ts
#define BENCH_DEFINE(k, v) \
@ -136,25 +139,27 @@ void bench_permutation(size_t i, uint32_t *buffer, size_t size);
// map defines to cfg struct fields
#define BENCH_CFG \
.read_size = READ_SIZE, \
.prog_size = PROG_SIZE, \
.block_size = BLOCK_SIZE, \
.block_count = BLOCK_COUNT, \
.block_recycles = BLOCK_RECYCLES, \
.rcache_size = RCACHE_SIZE, \
.pcache_size = PCACHE_SIZE, \
.file_cache_size = FILE_CACHE_SIZE, \
.lookahead_size = LOOKAHEAD_SIZE, \
BENCH_GBMAP_CFG \
BENCH_GC_CFG \
.gc_compactmeta_thresh = GC_COMPACTMETA_THRESH, \
.shrub_size = SHRUB_SIZE, \
.fragment_size = FRAGMENT_SIZE, \
.crystal_thresh = CRYSTAL_THRESH,
.read_size = READ_SIZE, \
.prog_size = PROG_SIZE, \
.block_size = BLOCK_SIZE, \
.block_count = BLOCK_COUNT, \
.block_recycles = BLOCK_RECYCLES, \
.rcache_size = RCACHE_SIZE, \
.pcache_size = PCACHE_SIZE, \
.file_cache_size = FILE_CACHE_SIZE, \
.lookahead_size = LOOKAHEAD_SIZE, \
BENCH_GBMAP_CFG \
BENCH_GC_CFG \
.gc_repoplookahead_thresh = GC_REPOPLOOKAHEAD_THRESH, \
.gc_compactmeta_thresh = GC_COMPACTMETA_THRESH, \
.shrub_size = SHRUB_SIZE, \
.fragment_size = FRAGMENT_SIZE, \
.crystal_thresh = CRYSTAL_THRESH,
#ifdef LFS3_GBMAP
#define BENCH_GBMAP_CFG \
.gbmap_repop_thresh = GBMAP_REPOP_THRESH,
.gc_repopgbmap_thresh = GC_REPOPGBMAP_THRESH, \
.gbmap_repop_thresh = GBMAP_REPOP_THRESH,
#else
#define BENCH_GBMAP_CFG
#endif

View File

@ -107,6 +107,9 @@ void test_permutation(size_t i, uint32_t *buffer, size_t size);
TEST_DEFINE(LOOKAHEAD_SIZE, 16 ) \
TEST_DEFINE(GC_FLAGS, LFS3_GC_ALL ) \
TEST_DEFINE(GC_STEPS, 0 ) \
TEST_DEFINE(GC_REPOPLOOKAHEAD_THRESH, \
-1 ) \
TEST_DEFINE(GC_REPOPGBMAP_THRESH, -1 ) \
TEST_DEFINE(GC_COMPACTMETA_THRESH, 0 ) \
TEST_DEFINE(SHRUB_SIZE, BLOCK_SIZE/4 ) \
TEST_DEFINE(FRAGMENT_SIZE, LFS3_MIN(BLOCK_SIZE/8, 512) ) \
@ -127,43 +130,45 @@ void test_permutation(size_t i, uint32_t *buffer, size_t size);
// map defines to cfg struct fields
#define TEST_CFG \
.read_size = READ_SIZE, \
.prog_size = PROG_SIZE, \
.block_size = BLOCK_SIZE, \
.block_count = BLOCK_COUNT, \
.block_recycles = BLOCK_RECYCLES, \
.rcache_size = RCACHE_SIZE, \
.pcache_size = PCACHE_SIZE, \
.file_cache_size = FILE_CACHE_SIZE, \
.lookahead_size = LOOKAHEAD_SIZE, \
TEST_GBMAP_CFG \
TEST_GC_CFG \
.gc_compactmeta_thresh = GC_COMPACTMETA_THRESH, \
.shrub_size = SHRUB_SIZE, \
.fragment_size = FRAGMENT_SIZE, \
.crystal_thresh = CRYSTAL_THRESH,
.read_size = READ_SIZE, \
.prog_size = PROG_SIZE, \
.block_size = BLOCK_SIZE, \
.block_count = BLOCK_COUNT, \
.block_recycles = BLOCK_RECYCLES, \
.rcache_size = RCACHE_SIZE, \
.pcache_size = PCACHE_SIZE, \
.file_cache_size = FILE_CACHE_SIZE, \
.lookahead_size = LOOKAHEAD_SIZE, \
TEST_GBMAP_CFG \
TEST_GC_CFG \
.gc_repoplookahead_thresh = GC_REPOPLOOKAHEAD_THRESH, \
.gc_compactmeta_thresh = GC_COMPACTMETA_THRESH, \
.shrub_size = SHRUB_SIZE, \
.fragment_size = FRAGMENT_SIZE, \
.crystal_thresh = CRYSTAL_THRESH,
#ifdef LFS3_GBMAP
#define TEST_GBMAP_CFG \
.gbmap_repop_thresh = GBMAP_REPOP_THRESH,
.gc_repopgbmap_thresh = GC_REPOPGBMAP_THRESH, \
.gbmap_repop_thresh = GBMAP_REPOP_THRESH,
#else
#define TEST_GBMAP_CFG
#endif
#ifdef LFS3_GC
#define TEST_GC_CFG \
.gc_flags = GC_FLAGS, \
.gc_steps = GC_STEPS,
.gc_flags = GC_FLAGS, \
.gc_steps = GC_STEPS,
#else
#define TEST_GC_CFG
#endif
#define TEST_BDCFG \
.erase_value = ERASE_VALUE, \
.erase_cycles = ERASE_CYCLES, \
.badblock_behavior = BADBLOCK_BEHAVIOR, \
.powerloss_behavior = POWERLOSS_BEHAVIOR, \
.seed = EMUBD_SEED,
.erase_value = ERASE_VALUE, \
.erase_cycles = ERASE_CYCLES, \
.badblock_behavior = BADBLOCK_BEHAVIOR, \
.powerloss_behavior = POWERLOSS_BEHAVIOR, \
.seed = EMUBD_SEED,
#endif

View File

@ -161,6 +161,117 @@ code = '''
lfs3_unmount(&lfs3) => 0;
'''
# test that we can relax lookahead repopulation with
# gc_repoplookahead_thresh
[cases.test_gc_repoplookahead_relaxed]
# relax our repop thresh
defines.GC_REPOPLOOKAHEAD_THRESH = [
'-1',
'8*LOOKAHEAD_SIZE - (8*LOOKAHEAD_SIZE/4)',
'8*LOOKAHEAD_SIZE - (8*LOOKAHEAD_SIZE/2)',
]
defines.CKMETA = [false, true]
defines.CKDATA = [false, true]
defines.GC_FLAGS = '''
LFS3_GC_REPOPLOOKAHEAD
| ((CKMETA) ? LFS3_GC_CKMETA : 0)
| ((CKDATA) ? LFS3_GC_CKDATA : 0)
'''
defines.GC_STEPS = [-1, 1, 2, 10, 100, 1000]
defines.SIZE = 'BLOCK_SIZE'
ifdef = 'LFS3_GC'
code = '''
lfs3_t lfs3;
lfs3_format(&lfs3,
LFS3_F_RDWR
| ((GBMAP) ? LFS3_IFDEF_GBMAP(LFS3_F_GBMAP, -1) : 0),
CFG) => 0;
lfs3_mount(&lfs3, LFS3_M_RDWR, CFG) => 0;
uint32_t prng = 42;
// create a file
lfs3_file_t file;
lfs3_file_open(&lfs3, &file, "spider",
LFS3_O_WRONLY | LFS3_O_CREAT | LFS3_O_EXCL) => 0;
uint8_t wbuf[SIZE];
for (lfs3_size_t j = 0; j < SIZE; j++) {
wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26);
}
lfs3_file_write(&lfs3, &file, wbuf, SIZE) => SIZE;
lfs3_file_close(&lfs3, &file) => 0;
// run GC until we make progress
for (lfs3_block_t i = 0;; i++) {
// a bit hacky, but this catches infinite loops
LFS3_ASSERT(i < 2*BLOCK_COUNT);
lfs3_fs_gc(&lfs3) => 0;
struct lfs3_fsinfo fsinfo;
lfs3_fs_stat(&lfs3, &fsinfo) => 0;
if (!(fsinfo.flags & LFS3_I_REPOPLOOKAHEAD)) {
break;
}
}
// check the file contents
lfs3_file_open(&lfs3, &file, "spider", LFS3_O_RDONLY) => 0;
uint8_t rbuf[SIZE];
lfs3_file_read(&lfs3, &file, rbuf, SIZE) => SIZE;
assert(memcmp(rbuf, wbuf, SIZE) == 0);
lfs3_file_close(&lfs3, &file) => 0;
// rewrite file until we need to repopulate
for (lfs3_block_t i = 0;; i++) {
// a bit hacky, but this catches infinite loops
LFS3_ASSERT(i < 2*BLOCK_COUNT);
lfs3_file_open(&lfs3, &file, "spider",
LFS3_O_WRONLY | LFS3_O_TRUNC) => 0;
for (lfs3_size_t j = 0; j < SIZE; j++) {
wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26);
}
lfs3_file_write(&lfs3, &file, wbuf, SIZE) => SIZE;
lfs3_file_close(&lfs3, &file) => 0;
struct lfs3_fsinfo fsinfo;
lfs3_fs_stat(&lfs3, &fsinfo) => 0;
if (fsinfo.flags & LFS3_I_REPOPLOOKAHEAD) {
// check that we actually relaxed repopulations
if ((lfs3_size_t)GC_REPOPLOOKAHEAD_THRESH
< (lfs3_size_t)(8*LOOKAHEAD_SIZE-1)) {
assert(i > 0);
} else {
assert(i == 0);
}
break;
}
}
// run GC until we make progress
for (lfs3_block_t i = 0;; i++) {
// a bit hacky, but this catches infinite loops
LFS3_ASSERT(i < 2*BLOCK_COUNT);
lfs3_fs_gc(&lfs3) => 0;
struct lfs3_fsinfo fsinfo;
lfs3_fs_stat(&lfs3, &fsinfo) => 0;
if (!(fsinfo.flags & LFS3_I_REPOPLOOKAHEAD)) {
break;
}
}
// check the file contents
lfs3_file_open(&lfs3, &file, "spider", LFS3_O_RDONLY) => 0;
lfs3_file_read(&lfs3, &file, rbuf, SIZE) => SIZE;
assert(memcmp(rbuf, wbuf, SIZE) == 0);
lfs3_file_close(&lfs3, &file) => 0;
lfs3_unmount(&lfs3) => 0;
'''
# test that repopgbmap can make progress in isolation
[cases.test_gc_repopgbmap_progress]
@ -314,6 +425,119 @@ code = '''
lfs3_unmount(&lfs3) => 0;
'''
# test that we can relax gbmap repopulation with gc_repopgbmap_thresh
[cases.test_gc_repopgbmap_relaxed]
# relax our repop thresh
defines.GC_REPOPGBMAP_THRESH = [
'-1',
'BLOCK_COUNT - (BLOCK_COUNT/4)',
'BLOCK_COUNT - (BLOCK_COUNT/2)',
]
defines.REPOPLOOKAHEAD = [false, true]
defines.CKMETA = [false, true]
defines.CKDATA = [false, true]
defines.GC_FLAGS = '''
LFS3_GC_REPOPGBMAP
| ((REPOPLOOKAHEAD) ? LFS3_GC_REPOPLOOKAHEAD : 0)
| ((CKMETA) ? LFS3_GC_CKMETA : 0)
| ((CKDATA) ? LFS3_GC_CKDATA : 0)
'''
defines.GC_STEPS = [-1, 1, 2, 10, 100, 1000]
defines.SIZE = 'BLOCK_SIZE'
if = 'GBMAP'
ifdef = ['LFS3_GC', 'LFS3_GBMAP']
code = '''
lfs3_t lfs3;
lfs3_format(&lfs3,
LFS3_F_RDWR
| ((GBMAP) ? LFS3_IFDEF_GBMAP(LFS3_F_GBMAP, -1) : 0),
CFG) => 0;
lfs3_mount(&lfs3, LFS3_M_RDWR, CFG) => 0;
uint32_t prng = 42;
// create a file
lfs3_file_t file;
lfs3_file_open(&lfs3, &file, "spider",
LFS3_O_WRONLY | LFS3_O_CREAT | LFS3_O_EXCL) => 0;
uint8_t wbuf[SIZE];
for (lfs3_size_t j = 0; j < SIZE; j++) {
wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26);
}
lfs3_file_write(&lfs3, &file, wbuf, SIZE) => SIZE;
lfs3_file_close(&lfs3, &file) => 0;
// run GC until we make progress
for (lfs3_block_t i = 0;; i++) {
// a bit hacky, but this catches infinite loops
LFS3_ASSERT(i < 2*BLOCK_COUNT);
lfs3_fs_gc(&lfs3) => 0;
struct lfs3_fsinfo fsinfo;
lfs3_fs_stat(&lfs3, &fsinfo) => 0;
if (!(fsinfo.flags & LFS3_I_REPOPGBMAP)) {
break;
}
}
// check the file contents
lfs3_file_open(&lfs3, &file, "spider", LFS3_O_RDONLY) => 0;
uint8_t rbuf[SIZE];
lfs3_file_read(&lfs3, &file, rbuf, SIZE) => SIZE;
assert(memcmp(rbuf, wbuf, SIZE) == 0);
lfs3_file_close(&lfs3, &file) => 0;
// rewrite file until we need to repopulate
for (lfs3_block_t i = 0;; i++) {
// a bit hacky, but this catches infinite loops
LFS3_ASSERT(i < 2*BLOCK_COUNT);
lfs3_file_open(&lfs3, &file, "spider",
LFS3_O_WRONLY | LFS3_O_TRUNC) => 0;
for (lfs3_size_t j = 0; j < SIZE; j++) {
wbuf[j] = 'a' + (TEST_PRNG(&prng) % 26);
}
lfs3_file_write(&lfs3, &file, wbuf, SIZE) => SIZE;
lfs3_file_close(&lfs3, &file) => 0;
struct lfs3_fsinfo fsinfo;
lfs3_fs_stat(&lfs3, &fsinfo) => 0;
if (fsinfo.flags & LFS3_I_REPOPGBMAP) {
// check that we actually relaxed repopulations
if ((lfs3_size_t)GC_REPOPGBMAP_THRESH
< BLOCK_COUNT-1) {
assert(i > 0);
} else {
assert(i == 0);
}
break;
}
}
// run GC until we make progress
for (lfs3_block_t i = 0;; i++) {
// a bit hacky, but this catches infinite loops
LFS3_ASSERT(i < 2*BLOCK_COUNT);
lfs3_fs_gc(&lfs3) => 0;
struct lfs3_fsinfo fsinfo;
lfs3_fs_stat(&lfs3, &fsinfo) => 0;
if (!(fsinfo.flags & LFS3_I_REPOPGBMAP)) {
break;
}
}
// check the file contents
lfs3_file_open(&lfs3, &file, "spider", LFS3_O_RDONLY) => 0;
lfs3_file_read(&lfs3, &file, rbuf, SIZE) => SIZE;
assert(memcmp(rbuf, wbuf, SIZE) == 0);
lfs3_file_close(&lfs3, &file) => 0;
lfs3_unmount(&lfs3) => 0;
'''
# test that compactmeta can make progress in isolation
[cases.test_gc_compactmeta_progress]