mirror of
https://github.com/hathach/tinyusb.git
synced 2026-02-04 10:05:36 +00:00
update metrics to support bloaty csv
This commit is contained in:
@ -227,7 +227,6 @@ jobs:
|
||||
name: Aggregate Code Metrics
|
||||
command: |
|
||||
python tools/get_deps.py
|
||||
pip install tools/linkermap/
|
||||
# Combine all metrics files from all toolchain subdirectories
|
||||
ls -R /tmp/metrics
|
||||
if ls /tmp/metrics/*/*.json 1> /dev/null 2>&1; then
|
||||
|
||||
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
@ -100,7 +100,6 @@ jobs:
|
||||
- name: Aggregate Code Metrics
|
||||
run: |
|
||||
python tools/get_deps.py
|
||||
pip install tools/linkermap/
|
||||
python tools/metrics.py combine -j -m -f tinyusb/src cmake-build/*/metrics.json
|
||||
|
||||
- name: Upload Metrics Artifact
|
||||
@ -124,7 +123,7 @@ jobs:
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
if [ -f base-metrics/metrics.json ]; then
|
||||
python tools/metrics.py compare -f tinyusb/src base-metrics/metrics.json metrics.json
|
||||
python tools/metrics.py compare -m -f tinyusb/src base-metrics/metrics.json metrics.json
|
||||
cat metrics_compare.md
|
||||
else
|
||||
echo "No base metrics found, skipping comparison"
|
||||
|
||||
@ -7,7 +7,6 @@ if (NOT DEFINED CMAKE_CXX_COMPILER)
|
||||
endif ()
|
||||
|
||||
set(CMAKE_ASM_COMPILER ${CMAKE_C_COMPILER})
|
||||
set(TOOLCHAIN_ASM_FLAGS "-x assembler-with-cpp")
|
||||
|
||||
find_program(CMAKE_SIZE llvm-size)
|
||||
find_program(CMAKE_OBJCOPY llvm-objcopy)
|
||||
|
||||
@ -20,41 +20,32 @@ include(${CMAKE_CURRENT_LIST_DIR}/../cpu/${CMAKE_SYSTEM_CPU}.cmake)
|
||||
# ----------------------------------------------------------------------------
|
||||
# Compile flags
|
||||
# ----------------------------------------------------------------------------
|
||||
set(TOOLCHAIN_C_FLAGS)
|
||||
set(TOOLCHAIN_ASM_FLAGS)
|
||||
set(TOOLCHAIN_EXE_LINKER_FLAGS)
|
||||
|
||||
if (TOOLCHAIN STREQUAL "gcc" OR TOOLCHAIN STREQUAL "clang")
|
||||
list(APPEND TOOLCHAIN_COMMON_FLAGS
|
||||
-fdata-sections
|
||||
-ffunction-sections
|
||||
# -fsingle-precision-constant # not supported by clang
|
||||
-fno-strict-aliasing
|
||||
-g
|
||||
)
|
||||
list(APPEND TOOLCHAIN_EXE_LINKER_FLAGS
|
||||
-Wl,--print-memory-usage
|
||||
-Wl,--gc-sections
|
||||
-Wl,--cref
|
||||
-g # include debug info for bloaty
|
||||
)
|
||||
set(TOOLCHAIN_EXE_LINKER_FLAGS "-Wl,--print-memory-usage -Wl,--gc-sections -Wl,--cref")
|
||||
|
||||
if (TOOLCHAIN STREQUAL clang)
|
||||
set(TOOLCHAIN_ASM_FLAGS "-x assembler-with-cpp")
|
||||
endif ()
|
||||
elseif (TOOLCHAIN STREQUAL "iar")
|
||||
list(APPEND TOOLCHAIN_COMMON_FLAGS
|
||||
--debug
|
||||
)
|
||||
list(APPEND TOOLCHAIN_EXE_LINKER_FLAGS
|
||||
--diag_suppress=Li065
|
||||
)
|
||||
set(TOOLCHAIN_C_FLAGS --debug)
|
||||
set(TOOLCHAIN_EXE_LINKER_FLAGS --diag_suppress=Li065)
|
||||
endif ()
|
||||
|
||||
# join the toolchain flags into a single string
|
||||
list(JOIN TOOLCHAIN_COMMON_FLAGS " " TOOLCHAIN_COMMON_FLAGS)
|
||||
foreach (LANG IN ITEMS C CXX ASM)
|
||||
set(CMAKE_${LANG}_FLAGS_INIT ${TOOLCHAIN_COMMON_FLAGS})
|
||||
# optimization flags for LOG, LOGGER ?
|
||||
#set(CMAKE_${LANG}_FLAGS_RELEASE_INIT "-Os")
|
||||
#set(CMAKE_${LANG}_FLAGS_DEBUG_INIT "-O0")
|
||||
endforeach ()
|
||||
|
||||
# Assembler
|
||||
if (DEFINED TOOLCHAIN_ASM_FLAGS)
|
||||
set(CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS_INIT} ${TOOLCHAIN_ASM_FLAGS}")
|
||||
endif ()
|
||||
|
||||
# Linker
|
||||
list(JOIN TOOLCHAIN_EXE_LINKER_FLAGS " " CMAKE_EXE_LINKER_FLAGS_INIT)
|
||||
set(CMAKE_C_FLAGS_INIT "${TOOLCHAIN_COMMON_FLAGS} ${TOOLCHAIN_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_INIT "${TOOLCHAIN_COMMON_FLAGS} ${TOOLCHAIN_C_FLAGS}")
|
||||
set(CMAKE_ASM_FLAGS_INIT "${TOOLCHAIN_COMMON_FLAGS} ${TOOLCHAIN_ASM_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS_INIT ${TOOLCHAIN_EXE_LINKER_FLAGS})
|
||||
|
||||
@ -232,7 +232,7 @@ function(family_add_bloaty TARGET)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
set(OPTION "--domain=vm -d compileunits") # add -d symbol if needed
|
||||
set(OPTION "--domain=vm -d compileunits,sections,symbols")
|
||||
if (DEFINED BLOATY_OPTION)
|
||||
string(APPEND OPTION " ${BLOATY_OPTION}")
|
||||
endif ()
|
||||
@ -240,36 +240,33 @@ function(family_add_bloaty TARGET)
|
||||
|
||||
add_custom_target(${TARGET}-bloaty
|
||||
DEPENDS ${TARGET}
|
||||
COMMAND ${BLOATY_EXE} ${OPTION_LIST} $<TARGET_FILE:${TARGET}> > $<TARGET_FILE:${TARGET}>.bloaty.txt
|
||||
COMMAND cat $<TARGET_FILE:${TARGET}>.bloaty.txt
|
||||
COMMAND ${BLOATY_EXE} ${OPTION_LIST} $<TARGET_FILE:${TARGET}>
|
||||
VERBATIM)
|
||||
|
||||
# post build
|
||||
add_custom_command(TARGET ${TARGET} POST_BUILD
|
||||
COMMAND ${BLOATY_EXE} ${OPTION_LIST} $<TARGET_FILE:${TARGET}> > $<TARGET_FILE:${TARGET}>.bloaty.txt
|
||||
COMMAND cat $<TARGET_FILE:${TARGET}>.bloaty.txt
|
||||
VERBATIM
|
||||
)
|
||||
# add_custom_command(TARGET ${TARGET} POST_BUILD
|
||||
# COMMAND ${BLOATY_EXE} --csv ${OPTION_LIST} $<TARGET_FILE:${TARGET}> > ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_bloaty.csv
|
||||
# VERBATIM
|
||||
# )
|
||||
endfunction()
|
||||
|
||||
# Add linkermap target (https://github.com/hathach/linkermap)
|
||||
function(family_add_linkermap TARGET)
|
||||
set(LINKERMAP_OPTION_LIST)
|
||||
set(OPTION "-j")
|
||||
if (DEFINED LINKERMAP_OPTION)
|
||||
separate_arguments(LINKERMAP_OPTION_LIST UNIX_COMMAND ${LINKERMAP_OPTION})
|
||||
string(APPEND OPTION " ${LINKERMAP_OPTION}")
|
||||
endif ()
|
||||
separate_arguments(OPTION_LIST UNIX_COMMAND ${OPTION})
|
||||
|
||||
add_custom_target(${TARGET}-linkermap
|
||||
COMMAND python ${LINKERMAP_PY} ${LINKERMAP_OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
COMMAND python ${LINKERMAP_PY} ${OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
# post build if bloaty not exist
|
||||
if (NOT TARGET ${TARGET}-bloaty)
|
||||
add_custom_command(TARGET ${TARGET} POST_BUILD
|
||||
COMMAND python ${LINKERMAP_PY} ${LINKERMAP_OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
VERBATIM)
|
||||
endif ()
|
||||
# post build
|
||||
add_custom_command(TARGET ${TARGET} POST_BUILD
|
||||
COMMAND python ${LINKERMAP_PY} ${OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
VERBATIM)
|
||||
endfunction()
|
||||
|
||||
#-------------------------------------------------------------
|
||||
@ -384,7 +381,7 @@ function(family_configure_common TARGET RTOS)
|
||||
if (NOT RTOS STREQUAL zephyr)
|
||||
# Analyze size with bloaty and linkermap
|
||||
family_add_bloaty(${TARGET})
|
||||
family_add_linkermap(${TARGET}) # fall back to linkermap if bloaty not found
|
||||
family_add_linkermap(${TARGET})
|
||||
endif ()
|
||||
|
||||
# run size after build
|
||||
|
||||
@ -183,11 +183,11 @@
|
||||
#define TU_BSWAP32(u32) (__builtin_bswap32(u32))
|
||||
#endif
|
||||
|
||||
#ifndef __ARMCC_VERSION
|
||||
// List of obsolete callback function that is renamed and should not be defined.
|
||||
// Put it here since only gcc support this pragma
|
||||
#pragma GCC poison tud_vendor_control_request_cb
|
||||
#endif
|
||||
#if !defined(__ARMCC_VERSION) && !defined(__ICCARM__)
|
||||
#pragma GCC poison tud_vendor_control_request_cb
|
||||
#endif
|
||||
|
||||
#elif defined(__ICCARM__)
|
||||
#include <intrinsics.h>
|
||||
|
||||
@ -821,7 +821,7 @@ static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
|
||||
}
|
||||
}
|
||||
|
||||
#if CFG_TUSB_DEBUG
|
||||
#if CFG_TUSB_DEBUG && 0
|
||||
TU_ATTR_ALWAYS_INLINE static inline void print_hcint(uint32_t hcint) {
|
||||
const char* str[] = {
|
||||
"XFRC", "HALTED", "AHBERR", "STALL",
|
||||
|
||||
@ -15,7 +15,7 @@ deps_mandatory = {
|
||||
'159e31b689577dbf69cf0683bbaffbd71fa5ee10',
|
||||
'all'],
|
||||
'tools/linkermap': ['https://github.com/hathach/linkermap.git',
|
||||
'23d1c4c84c4866b84cb821fb368bb9991633871d',
|
||||
'8e1f440fa15c567aceb5aa0d14f6d18c329cc67f',
|
||||
'all'],
|
||||
'tools/uf2': ['https://github.com/microsoft/uf2.git',
|
||||
'c594542b2faa01cc33a2b97c9fbebc38549df80a',
|
||||
|
||||
152
tools/metrics.py
152
tools/metrics.py
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Calculate average sizes using bloaty output."""
|
||||
"""Calculate average sizes from bloaty CSV or TinyUSB metrics JSON outputs."""
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
@ -85,7 +85,7 @@ def parse_bloaty_csv(csv_text, filters=None):
|
||||
|
||||
|
||||
def combine_files(input_files, filters=None):
|
||||
"""Combine multiple bloaty outputs into a single data set."""
|
||||
"""Combine multiple metrics inputs (bloaty CSV or metrics JSON) into a single data set."""
|
||||
|
||||
filters = filters or []
|
||||
all_json_data = {"file_list": [], "data": []}
|
||||
@ -168,12 +168,6 @@ def compute_avg(all_json_data):
|
||||
continue
|
||||
file_accumulator[fname]["symbols"][name].append(sym.get("size", 0))
|
||||
sections_map = f.get("sections") or {}
|
||||
if isinstance(sections_map, list):
|
||||
sections_map = {
|
||||
s.get("name"): s.get("size", 0)
|
||||
for s in sections_map
|
||||
if isinstance(s, dict) and s.get("name")
|
||||
}
|
||||
for sname, ssize in sections_map.items():
|
||||
file_accumulator[fname]["sections"][sname].append(ssize)
|
||||
|
||||
@ -240,6 +234,8 @@ def compare_files(base_file, new_file, filters=None):
|
||||
n = new_files.get(fname, {})
|
||||
b_size = b.get("size", 0)
|
||||
n_size = n.get("size", 0)
|
||||
base_sections = b.get("sections") or {}
|
||||
new_sections = n.get("sections") or {}
|
||||
|
||||
# Symbol diffs
|
||||
b_syms = {s["name"]: s for s in b.get("symbols", [])}
|
||||
@ -256,6 +252,14 @@ def compare_files(base_file, new_file, filters=None):
|
||||
"file": fname,
|
||||
"size": {"base": b_size, "new": n_size, "diff": n_size - b_size},
|
||||
"symbols": symbols,
|
||||
"sections": {
|
||||
name: {
|
||||
"base": base_sections.get(name, 0),
|
||||
"new": new_sections.get(name, 0),
|
||||
"diff": new_sections.get(name, 0) - base_sections.get(name, 0),
|
||||
}
|
||||
for name in sorted(set(base_sections) | set(new_sections))
|
||||
},
|
||||
})
|
||||
|
||||
total = {
|
||||
@ -299,6 +303,17 @@ def get_sort_key(sort_order):
|
||||
return lambda x: x.get('file', ''), False
|
||||
|
||||
|
||||
def format_diff(base, new, diff):
|
||||
"""Format a diff value with percentage."""
|
||||
if diff == 0:
|
||||
return f"{new}"
|
||||
if base == 0 or new == 0:
|
||||
return f"{base} ➙ {new}"
|
||||
pct = (diff / base) * 100
|
||||
sign = "+" if diff > 0 else ""
|
||||
return f"{base} ➙ {new} ({sign}{diff}, {sign}{pct:.1f}%)"
|
||||
|
||||
|
||||
def write_json_output(json_data, path):
|
||||
"""Write JSON output with indentation."""
|
||||
|
||||
@ -315,7 +330,7 @@ def render_combine_table(json_data, sort_order='name+'):
|
||||
key_func, reverse = get_sort_key(sort_order)
|
||||
files_sorted = sorted(files, key=key_func, reverse=reverse)
|
||||
|
||||
total_size = json_data.get("TOTAL") or (sum(f.get("size", 0) for f in files_sorted) or 1)
|
||||
total_size = json_data.get("TOTAL") or sum(f.get("size", 0) for f in files_sorted)
|
||||
|
||||
pct_strings = [
|
||||
f"{(f.get('percent') if f.get('percent') is not None else (f.get('size', 0) / total_size * 100 if total_size else 0)):.1f}%"
|
||||
@ -357,12 +372,6 @@ def render_combine_table(json_data, sort_order='name+'):
|
||||
parts = [f"| {f.get('file', ''):<{file_width}} |"]
|
||||
if section_names:
|
||||
sections_map = f.get("sections") or {}
|
||||
if isinstance(sections_map, list):
|
||||
sections_map = {
|
||||
s.get("name"): s.get("size", 0)
|
||||
for s in sections_map
|
||||
if isinstance(s, dict) and s.get("name")
|
||||
}
|
||||
for name in section_names:
|
||||
parts.append(f" {sections_map.get(name, 0):>{section_widths[name]}} |")
|
||||
parts.append(f" {size_val:>{size_width}} | {pct_str:>{pct_width}} |")
|
||||
@ -469,6 +478,7 @@ def _build_rows(files, sort_order):
|
||||
"new": sd['new'],
|
||||
"diff": diff_val,
|
||||
"pct": pct_str,
|
||||
"sections": f.get("sections", {}),
|
||||
})
|
||||
return rows
|
||||
|
||||
@ -506,59 +516,68 @@ def render_compare_table(rows, include_sum):
|
||||
if not rows:
|
||||
return ["No entries.", ""]
|
||||
|
||||
# collect section columns (reverse alpha)
|
||||
section_names = sorted(
|
||||
{name for r in rows for name in (r.get("sections") or {})},
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
def fmt_abs(val_old, val_new):
|
||||
diff = val_new - val_old
|
||||
if diff == 0:
|
||||
return f"{val_new}"
|
||||
sign = "+" if diff > 0 else ""
|
||||
return f"{val_old} ➙ {val_new} ({sign}{diff})"
|
||||
|
||||
sum_base = sum(r["base"] for r in rows)
|
||||
sum_new = sum(r["new"] for r in rows)
|
||||
total_diff = sum_new - sum_base
|
||||
total_pct = "n/a" if sum_base == 0 else f"{(total_diff / sum_base) * 100:+.1f}%"
|
||||
|
||||
base_width = max(len("base"), *(len(str(r["base"])) for r in rows))
|
||||
new_width = max(len("new"), *(len(str(r["new"])) for r in rows))
|
||||
diff_width = max(len("diff"), *(len(f"{r['diff']:+}") for r in rows))
|
||||
pct_width = max(len("% diff"), *(len(r["pct"]) for r in rows))
|
||||
name_width = max(len("file"), *(len(r["file"]) for r in rows))
|
||||
|
||||
if include_sum:
|
||||
base_width = max(base_width, len(str(sum_base)))
|
||||
new_width = max(new_width, len(str(sum_new)))
|
||||
diff_width = max(diff_width, len(f"{total_diff:+}"))
|
||||
pct_width = max(pct_width, len(total_pct))
|
||||
name_width = max(name_width, len("TOTAL"))
|
||||
|
||||
header = (
|
||||
f"| {'file':<{name_width}} | "
|
||||
f"{'base':>{base_width}} | "
|
||||
f"{'new':>{new_width}} | "
|
||||
f"{'diff':>{diff_width}} | "
|
||||
f"{'% diff':>{pct_width}} |"
|
||||
)
|
||||
separator = (
|
||||
f"| :{'-' * (name_width - 1)} | "
|
||||
f"{'-' * base_width}:| "
|
||||
f"{'-' * new_width}:| "
|
||||
f"{'-' * diff_width}:| "
|
||||
f"{'-' * pct_width}:|"
|
||||
file_width = max(len("file"), *(len(r["file"]) for r in rows), len("TOTAL"))
|
||||
size_width = max(
|
||||
len("size"),
|
||||
*(len(fmt_abs(r["base"], r["new"])) for r in rows),
|
||||
len(fmt_abs(sum_base, sum_new)),
|
||||
)
|
||||
pct_width = max(len("% diff"), *(len(r["pct"]) for r in rows), len(total_pct))
|
||||
section_widths = {}
|
||||
for name in section_names:
|
||||
max_val_len = 0
|
||||
for r in rows:
|
||||
sec_entry = (r.get("sections") or {}).get(name, {"base": 0, "new": 0})
|
||||
max_val_len = max(max_val_len, len(fmt_abs(sec_entry.get("base", 0), sec_entry.get("new", 0))))
|
||||
section_widths[name] = max(len(name), max_val_len, 1)
|
||||
|
||||
header_parts = [f"| {'file':<{file_width}} |"]
|
||||
sep_parts = [f"| :{'-' * (file_width - 1)} |"]
|
||||
for name in section_names:
|
||||
header_parts.append(f" {name:>{section_widths[name]}} |")
|
||||
sep_parts.append(f" {'-' * (section_widths[name] - 1)}: |")
|
||||
header_parts.append(f" {'size':>{size_width}} | {'% diff':>{pct_width}} |")
|
||||
sep_parts.append(f" {'-' * (size_width - 1)}: | {'-' * (pct_width - 1)}: |")
|
||||
header = "".join(header_parts)
|
||||
separator = "".join(sep_parts)
|
||||
|
||||
lines = [header, separator]
|
||||
|
||||
for r in rows:
|
||||
diff_str = f"{r['diff']:+}"
|
||||
lines.append(
|
||||
f"| {r['file']:<{name_width}} | "
|
||||
f"{str(r['base']):>{base_width}} | "
|
||||
f"{str(r['new']):>{new_width}} | "
|
||||
f"{diff_str:>{diff_width}} | "
|
||||
f"{r['pct']:>{pct_width}} |"
|
||||
)
|
||||
parts = [f"| {r['file']:<{file_width}} |"]
|
||||
sections_map = r.get("sections") or {}
|
||||
for name in section_names:
|
||||
sec_entry = sections_map.get(name, {"base": 0, "new": 0})
|
||||
parts.append(f" {fmt_abs(sec_entry.get('base', 0), sec_entry.get('new', 0)):>{section_widths[name]}} |")
|
||||
parts.append(f" {fmt_abs(r['base'], r['new']):>{size_width}} | {r['pct']:>{pct_width}} |")
|
||||
lines.append("".join(parts))
|
||||
|
||||
if include_sum:
|
||||
lines.append(
|
||||
f"| {'TOTAL':<{name_width}} | "
|
||||
f"{sum_base:>{base_width}} | "
|
||||
f"{sum_new:>{new_width}} | "
|
||||
f"{total_diff:+{diff_width}d} | "
|
||||
f"{total_pct:>{pct_width}} |"
|
||||
)
|
||||
total_parts = [f"| {'TOTAL':<{file_width}} |"]
|
||||
for name in section_names:
|
||||
total_base = sum((r.get("sections") or {}).get(name, {}).get("base", 0) for r in rows)
|
||||
total_new = sum((r.get("sections") or {}).get(name, {}).get("new", 0) for r in rows)
|
||||
total_parts.append(f" {fmt_abs(total_base, total_new):>{section_widths[name]}} |")
|
||||
total_parts.append(f" {fmt_abs(sum_base, sum_new):>{size_width}} | {total_pct:>{pct_width}} |")
|
||||
lines.append("".join(total_parts))
|
||||
return lines
|
||||
|
||||
|
||||
@ -592,9 +611,10 @@ def cmd_compare(args):
|
||||
|
||||
if not args.quiet:
|
||||
print_compare_summary(comparison, args.sort)
|
||||
write_compare_markdown(comparison, args.out + '.md', args.sort)
|
||||
if not args.quiet:
|
||||
print(f"Comparison written to {args.out}.md")
|
||||
if args.markdown_out:
|
||||
write_compare_markdown(comparison, args.out + '.md', args.sort)
|
||||
if not args.quiet:
|
||||
print(f"Comparison written to {args.out}.md")
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
@ -602,9 +622,9 @@ def main(argv=None):
|
||||
subparsers = parser.add_subparsers(dest='command', required=True, help='Available commands')
|
||||
|
||||
# Combine subcommand
|
||||
combine_parser = subparsers.add_parser('combine', help='Combine and average multiple bloaty outputs')
|
||||
combine_parser = subparsers.add_parser('combine', help='Combine and average bloaty CSV outputs or metrics JSON files')
|
||||
combine_parser.add_argument('files', nargs='+',
|
||||
help='Path to bloaty CSV output or JSON file(s) or glob pattern(s)')
|
||||
help='Path to bloaty CSV output or TinyUSB metrics JSON file(s) (including linkermap-generated) or glob pattern(s)')
|
||||
combine_parser.add_argument('-f', '--filter', dest='filters', action='append', default=[],
|
||||
help='Only include compile units whose path contains this substring (can be repeated)')
|
||||
combine_parser.add_argument('-o', '--out', dest='out', default='metrics',
|
||||
@ -620,13 +640,15 @@ def main(argv=None):
|
||||
help='Sort order: size/size- (descending), size+ (ascending), name/name+ (ascending), name- (descending). Default: size-')
|
||||
|
||||
# Compare subcommand
|
||||
compare_parser = subparsers.add_parser('compare', help='Compare two bloaty outputs (CSV) or JSON inputs')
|
||||
compare_parser.add_argument('base', help='Base CSV/JSON file')
|
||||
compare_parser.add_argument('new', help='New CSV/JSON file')
|
||||
compare_parser = subparsers.add_parser('compare', help='Compare two metrics inputs (bloaty CSV or metrics JSON)')
|
||||
compare_parser.add_argument('base', help='Base CSV/metrics JSON file')
|
||||
compare_parser.add_argument('new', help='New CSV/metrics JSON file')
|
||||
compare_parser.add_argument('-f', '--filter', dest='filters', action='append', default=[],
|
||||
help='Only include compile units whose path contains this substring (can be repeated)')
|
||||
compare_parser.add_argument('-o', '--out', dest='out', default='metrics_compare',
|
||||
help='Output path basename for Markdown file (default: metrics_compare)')
|
||||
help='Output path basename for Markdown/JSON files (default: metrics_compare)')
|
||||
compare_parser.add_argument('-m', '--markdown', dest='markdown_out', action='store_true',
|
||||
help='Write Markdown output file')
|
||||
compare_parser.add_argument('-S', '--sort', dest='sort', default='name+',
|
||||
choices=['size', 'size-', 'size+', 'name', 'name-', 'name+'],
|
||||
help='Sort order: size/size- (descending), size+ (ascending), name/name+ (ascending), name- (descending). Default: name+')
|
||||
|
||||
Reference in New Issue
Block a user