mirror of
https://github.com/hathach/tinyusb.git
synced 2026-02-04 08:05:32 +00:00
Merge pull request #3399 from hathach/size-reduce
edpt stream only support no fifo mode if needed
This commit is contained in:
@ -227,7 +227,6 @@ jobs:
|
||||
name: Aggregate Code Metrics
|
||||
command: |
|
||||
python tools/get_deps.py
|
||||
pip install tools/linkermap/
|
||||
# Combine all metrics files from all toolchain subdirectories
|
||||
ls -R /tmp/metrics
|
||||
if ls /tmp/metrics/*/*.json 1> /dev/null 2>&1; then
|
||||
|
||||
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
@ -100,7 +100,6 @@ jobs:
|
||||
- name: Aggregate Code Metrics
|
||||
run: |
|
||||
python tools/get_deps.py
|
||||
pip install tools/linkermap/
|
||||
python tools/metrics.py combine -j -m -f tinyusb/src cmake-build/*/metrics.json
|
||||
|
||||
- name: Upload Metrics Artifact
|
||||
@ -124,7 +123,7 @@ jobs:
|
||||
if: github.event_name != 'push'
|
||||
run: |
|
||||
if [ -f base-metrics/metrics.json ]; then
|
||||
python tools/metrics.py compare -f tinyusb/src base-metrics/metrics.json metrics.json
|
||||
python tools/metrics.py compare -m -f tinyusb/src base-metrics/metrics.json metrics.json
|
||||
cat metrics_compare.md
|
||||
else
|
||||
echo "No base metrics found, skipping comparison"
|
||||
|
||||
@ -7,7 +7,6 @@ if (NOT DEFINED CMAKE_CXX_COMPILER)
|
||||
endif ()
|
||||
|
||||
set(CMAKE_ASM_COMPILER ${CMAKE_C_COMPILER})
|
||||
set(TOOLCHAIN_ASM_FLAGS "-x assembler-with-cpp")
|
||||
|
||||
find_program(CMAKE_SIZE llvm-size)
|
||||
find_program(CMAKE_OBJCOPY llvm-objcopy)
|
||||
|
||||
@ -20,37 +20,32 @@ include(${CMAKE_CURRENT_LIST_DIR}/../cpu/${CMAKE_SYSTEM_CPU}.cmake)
|
||||
# ----------------------------------------------------------------------------
|
||||
# Compile flags
|
||||
# ----------------------------------------------------------------------------
|
||||
set(TOOLCHAIN_C_FLAGS)
|
||||
set(TOOLCHAIN_ASM_FLAGS)
|
||||
set(TOOLCHAIN_EXE_LINKER_FLAGS)
|
||||
|
||||
if (TOOLCHAIN STREQUAL "gcc" OR TOOLCHAIN STREQUAL "clang")
|
||||
list(APPEND TOOLCHAIN_COMMON_FLAGS
|
||||
-fdata-sections
|
||||
-ffunction-sections
|
||||
# -fsingle-precision-constant # not supported by clang
|
||||
-fno-strict-aliasing
|
||||
-g # include debug info for bloaty
|
||||
)
|
||||
list(APPEND TOOLCHAIN_EXE_LINKER_FLAGS
|
||||
-Wl,--print-memory-usage
|
||||
-Wl,--gc-sections
|
||||
-Wl,--cref
|
||||
)
|
||||
set(TOOLCHAIN_EXE_LINKER_FLAGS "-Wl,--print-memory-usage -Wl,--gc-sections -Wl,--cref")
|
||||
|
||||
if (TOOLCHAIN STREQUAL clang)
|
||||
set(TOOLCHAIN_ASM_FLAGS "-x assembler-with-cpp")
|
||||
endif ()
|
||||
elseif (TOOLCHAIN STREQUAL "iar")
|
||||
list(APPEND TOOLCHAIN_EXE_LINKER_FLAGS
|
||||
--diag_suppress=Li065
|
||||
)
|
||||
set(TOOLCHAIN_C_FLAGS --debug)
|
||||
set(TOOLCHAIN_EXE_LINKER_FLAGS --diag_suppress=Li065)
|
||||
endif ()
|
||||
|
||||
# join the toolchain flags into a single string
|
||||
list(JOIN TOOLCHAIN_COMMON_FLAGS " " TOOLCHAIN_COMMON_FLAGS)
|
||||
foreach (LANG IN ITEMS C CXX ASM)
|
||||
set(CMAKE_${LANG}_FLAGS_INIT ${TOOLCHAIN_COMMON_FLAGS})
|
||||
# optimization flags for LOG, LOGGER ?
|
||||
#set(CMAKE_${LANG}_FLAGS_RELEASE_INIT "-Os")
|
||||
#set(CMAKE_${LANG}_FLAGS_DEBUG_INIT "-O0")
|
||||
endforeach ()
|
||||
|
||||
# Assembler
|
||||
if (DEFINED TOOLCHAIN_ASM_FLAGS)
|
||||
set(CMAKE_ASM_FLAGS_INIT "${CMAKE_ASM_FLAGS_INIT} ${TOOLCHAIN_ASM_FLAGS}")
|
||||
endif ()
|
||||
|
||||
# Linker
|
||||
list(JOIN TOOLCHAIN_EXE_LINKER_FLAGS " " CMAKE_EXE_LINKER_FLAGS_INIT)
|
||||
set(CMAKE_C_FLAGS_INIT "${TOOLCHAIN_COMMON_FLAGS} ${TOOLCHAIN_C_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS_INIT "${TOOLCHAIN_COMMON_FLAGS} ${TOOLCHAIN_C_FLAGS}")
|
||||
set(CMAKE_ASM_FLAGS_INIT "${TOOLCHAIN_COMMON_FLAGS} ${TOOLCHAIN_ASM_FLAGS}")
|
||||
set(CMAKE_EXE_LINKER_FLAGS_INIT ${TOOLCHAIN_EXE_LINKER_FLAGS})
|
||||
|
||||
@ -10,6 +10,7 @@ get_filename_component(TOP ${TOP} ABSOLUTE)
|
||||
|
||||
set(UF2CONV_PY ${TOP}/tools/uf2/utils/uf2conv.py)
|
||||
set(LINKERMAP_PY ${TOP}/tools/linkermap/linkermap.py)
|
||||
set(METRICS_PY ${TOP}/tools/metrics.py)
|
||||
|
||||
function(family_resolve_board BOARD_NAME BOARD_PATH_OUT)
|
||||
if ("${BOARD_NAME}" STREQUAL "")
|
||||
@ -224,21 +225,47 @@ function(family_initialize_project PROJECT DIR)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Add linkermap target (https://github.com/hathach/linkermap)
|
||||
function(family_add_linkermap TARGET)
|
||||
set(LINKERMAP_OPTION_LIST)
|
||||
if (DEFINED LINKERMAP_OPTION)
|
||||
separate_arguments(LINKERMAP_OPTION_LIST UNIX_COMMAND ${LINKERMAP_OPTION})
|
||||
# Add bloaty (https://github.com/google/bloaty/) target, required compile with -g (debug)
|
||||
function(family_add_bloaty TARGET)
|
||||
find_program(BLOATY_EXE bloaty)
|
||||
if (BLOATY_EXE STREQUAL BLOATY_EXE-NOTFOUND)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
set(OPTION "--domain=vm -d compileunits,sections,symbols")
|
||||
if (DEFINED BLOATY_OPTION)
|
||||
string(APPEND OPTION " ${BLOATY_OPTION}")
|
||||
endif ()
|
||||
separate_arguments(OPTION_LIST UNIX_COMMAND ${OPTION})
|
||||
|
||||
add_custom_target(${TARGET}-bloaty
|
||||
DEPENDS ${TARGET}
|
||||
COMMAND ${BLOATY_EXE} ${OPTION_LIST} $<TARGET_FILE:${TARGET}>
|
||||
VERBATIM)
|
||||
|
||||
# post build
|
||||
# add_custom_command(TARGET ${TARGET} POST_BUILD
|
||||
# COMMAND ${BLOATY_EXE} --csv ${OPTION_LIST} $<TARGET_FILE:${TARGET}> > ${CMAKE_CURRENT_BINARY_DIR}/${TARGET}_bloaty.csv
|
||||
# VERBATIM
|
||||
# )
|
||||
endfunction()
|
||||
|
||||
# Add linkermap target (https://github.com/hathach/linkermap)
|
||||
function(family_add_linkermap TARGET)
|
||||
set(OPTION "-j")
|
||||
if (DEFINED LINKERMAP_OPTION)
|
||||
string(APPEND OPTION " ${LINKERMAP_OPTION}")
|
||||
endif ()
|
||||
separate_arguments(OPTION_LIST UNIX_COMMAND ${OPTION})
|
||||
|
||||
add_custom_target(${TARGET}-linkermap
|
||||
COMMAND python ${LINKERMAP_PY} -j ${LINKERMAP_OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
COMMAND python ${LINKERMAP_PY} ${OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
# post build
|
||||
add_custom_command(TARGET ${TARGET} POST_BUILD
|
||||
COMMAND python ${LINKERMAP_PY} -j ${LINKERMAP_OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
COMMAND python ${LINKERMAP_PY} ${OPTION_LIST} $<TARGET_FILE:${TARGET}>.map
|
||||
VERBATIM)
|
||||
endfunction()
|
||||
|
||||
@ -352,7 +379,8 @@ function(family_configure_common TARGET RTOS)
|
||||
endif ()
|
||||
|
||||
if (NOT RTOS STREQUAL zephyr)
|
||||
# Generate linkermap target and post build. LINKERMAP_OPTION can be set with -D to change default options
|
||||
# Analyze size with bloaty and linkermap
|
||||
family_add_bloaty(${TARGET})
|
||||
family_add_linkermap(${TARGET})
|
||||
endif ()
|
||||
|
||||
|
||||
@ -24,21 +24,13 @@
|
||||
* This file is part of the TinyUSB stack.
|
||||
*/
|
||||
|
||||
/** \ingroup Group_Common
|
||||
* \defgroup Group_Compiler Compiler
|
||||
* \brief Group_Compiler brief
|
||||
* @{ */
|
||||
|
||||
#ifndef TUSB_COMPILER_H_
|
||||
#define TUSB_COMPILER_H_
|
||||
#pragma once
|
||||
|
||||
#define TU_TOKEN(x) x
|
||||
#define TU_STRING(x) #x ///< stringify without expand
|
||||
#define TU_XSTRING(x) TU_STRING(x) ///< expand then stringify
|
||||
|
||||
#define TU_STRCAT(a, b) a##b ///< concat without expand
|
||||
#define TU_STRCAT3(a, b, c) a##b##c ///< concat without expand
|
||||
|
||||
#define TU_XSTRCAT(a, b) TU_STRCAT(a, b) ///< expand then concat
|
||||
#define TU_XSTRCAT3(a, b, c) TU_STRCAT3(a, b, c) ///< expand then concat 3 tokens
|
||||
|
||||
@ -139,18 +131,20 @@
|
||||
#define TU_FUNC_OPTIONAL_ARG(func, ...) TU_XSTRCAT(func##_arg, TU_ARGS_NUM(__VA_ARGS__))(__VA_ARGS__)
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
// Compiler porting with Attribute and Endian
|
||||
// Compiler Attribute Abstraction
|
||||
//--------------------------------------------------------------------+
|
||||
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__TI_COMPILER_VERSION__)
|
||||
#if defined(__ICCARM__)
|
||||
#include <intrinsics.h> // for builtin functions
|
||||
#endif
|
||||
|
||||
// TODO refactor since __attribute__ is supported across many compiler
|
||||
#if defined(__GNUC__)
|
||||
#define TU_ATTR_ALIGNED(Bytes) __attribute__ ((aligned(Bytes)))
|
||||
#define TU_ATTR_SECTION(sec_name) __attribute__ ((section(#sec_name)))
|
||||
#define TU_ATTR_PACKED __attribute__ ((packed))
|
||||
#define TU_ATTR_WEAK __attribute__ ((weak))
|
||||
// #define TU_ATTR_WEAK_ALIAS(f) __attribute__ ((weak, alias(#f)))
|
||||
#ifndef TU_ATTR_ALWAYS_INLINE // allow to override for debug
|
||||
#define TU_ATTR_ALWAYS_INLINE __attribute__ ((always_inline))
|
||||
#define TU_ATTR_ALIGNED(Bytes) __attribute__((aligned(Bytes)))
|
||||
#define TU_ATTR_SECTION(sec_name) __attribute__((section(#sec_name)))
|
||||
#define TU_ATTR_PACKED __attribute__((packed))
|
||||
#define TU_ATTR_WEAK __attribute__((weak))
|
||||
// #define TU_ATTR_WEAK_ALIAS(f) __attribute__ ((weak, alias(#f)))
|
||||
#ifndef TU_ATTR_ALWAYS_INLINE // allow to override for debug
|
||||
#define TU_ATTR_ALWAYS_INLINE __attribute__((always_inline))
|
||||
#endif
|
||||
#define TU_ATTR_DEPRECATED(mess) __attribute__ ((deprecated(mess))) // warn if function with this attribute is used
|
||||
#define TU_ATTR_UNUSED __attribute__ ((unused)) // Function/Variable is meant to be possibly unused
|
||||
@ -161,18 +155,17 @@
|
||||
#define TU_ATTR_BIT_FIELD_ORDER_BEGIN
|
||||
#define TU_ATTR_BIT_FIELD_ORDER_END
|
||||
|
||||
#if __GNUC__ < 5
|
||||
#define TU_ATTR_FALLTHROUGH do {} while (0) /* fallthrough */
|
||||
#if (defined(__has_attribute) && __has_attribute(__fallthrough__)) || defined(__TI_COMPILER_VERSION__)
|
||||
#define TU_ATTR_FALLTHROUGH __attribute__((fallthrough))
|
||||
#else
|
||||
#if __has_attribute(__fallthrough__)
|
||||
#define TU_ATTR_FALLTHROUGH __attribute__((fallthrough))
|
||||
#else
|
||||
#define TU_ATTR_FALLTHROUGH do {} while (0) /* fallthrough */
|
||||
#endif
|
||||
#define TU_ATTR_FALLTHROUGH \
|
||||
do { \
|
||||
} while (0) /* fallthrough */
|
||||
#endif
|
||||
|
||||
// Endian conversion use well-known host to network (big endian) naming
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
// Endian conversion use well-known host to network (big endian) naming
|
||||
// For TI ARM compiler, __BYTE_ORDER__ is not defined for MSP430 but still LE
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || defined(__MSP430__)
|
||||
#define TU_BYTE_ORDER TU_LITTLE_ENDIAN
|
||||
#else
|
||||
#define TU_BYTE_ORDER TU_BIG_ENDIAN
|
||||
@ -190,39 +183,12 @@
|
||||
#define TU_BSWAP32(u32) (__builtin_bswap32(u32))
|
||||
#endif
|
||||
|
||||
#ifndef __ARMCC_VERSION
|
||||
// List of obsolete callback function that is renamed and should not be defined.
|
||||
// Put it here since only gcc support this pragma
|
||||
#pragma GCC poison tud_vendor_control_request_cb
|
||||
#endif
|
||||
|
||||
#elif defined(__TI_COMPILER_VERSION__)
|
||||
#define TU_ATTR_ALIGNED(Bytes) __attribute__ ((aligned(Bytes)))
|
||||
#define TU_ATTR_SECTION(sec_name) __attribute__ ((section(#sec_name)))
|
||||
#define TU_ATTR_PACKED __attribute__ ((packed))
|
||||
#define TU_ATTR_WEAK __attribute__ ((weak))
|
||||
// #define TU_ATTR_WEAK_ALIAS(f) __attribute__ ((weak, alias(#f)))
|
||||
#define TU_ATTR_ALWAYS_INLINE __attribute__ ((always_inline))
|
||||
#define TU_ATTR_DEPRECATED(mess) __attribute__ ((deprecated(mess))) // warn if function with this attribute is used
|
||||
#define TU_ATTR_UNUSED __attribute__ ((unused)) // Function/Variable is meant to be possibly unused
|
||||
#define TU_ATTR_USED __attribute__ ((used))
|
||||
#define TU_ATTR_FALLTHROUGH __attribute__((fallthrough))
|
||||
|
||||
#define TU_ATTR_PACKED_BEGIN
|
||||
#define TU_ATTR_PACKED_END
|
||||
#define TU_ATTR_BIT_FIELD_ORDER_BEGIN
|
||||
#define TU_ATTR_BIT_FIELD_ORDER_END
|
||||
|
||||
// __BYTE_ORDER is defined in the TI ARM compiler, but not MSP430 (which is little endian)
|
||||
#if ((__BYTE_ORDER__) == (__ORDER_LITTLE_ENDIAN__)) || defined(__MSP430__)
|
||||
#define TU_BYTE_ORDER TU_LITTLE_ENDIAN
|
||||
#else
|
||||
#define TU_BYTE_ORDER TU_BIG_ENDIAN
|
||||
#if !defined(__ARMCC_VERSION) && !defined(__ICCARM__)
|
||||
#pragma GCC poison tud_vendor_control_request_cb
|
||||
#endif
|
||||
|
||||
#define TU_BSWAP16(u16) (__builtin_bswap16(u16))
|
||||
#define TU_BSWAP32(u32) (__builtin_bswap32(u32))
|
||||
|
||||
#elif defined(__ICCARM__)
|
||||
#include <intrinsics.h>
|
||||
#define TU_ATTR_ALIGNED(Bytes) __attribute__ ((aligned(Bytes)))
|
||||
@ -316,7 +282,3 @@
|
||||
#else
|
||||
#error Byte order is undefined
|
||||
#endif
|
||||
|
||||
#endif /* TUSB_COMPILER_H_ */
|
||||
|
||||
/// @}
|
||||
|
||||
@ -224,10 +224,11 @@ TU_ATTR_ALWAYS_INLINE static inline uint16_t tu_fifo_write_n(tu_fifo_t *f, const
|
||||
//--------------------------------------------------------------------+
|
||||
// return overflowable count (index difference), which can be used to determine both fifo count and an overflow state
|
||||
TU_ATTR_ALWAYS_INLINE static inline uint16_t tu_ff_overflow_count(uint16_t depth, uint16_t wr_idx, uint16_t rd_idx) {
|
||||
if (wr_idx >= rd_idx) {
|
||||
return (uint16_t)(wr_idx - rd_idx);
|
||||
const int32_t diff = (int32_t)wr_idx - (int32_t)rd_idx;
|
||||
if (diff >= 0) {
|
||||
return (uint16_t)diff;
|
||||
} else {
|
||||
return (uint16_t)(2 * depth - (rd_idx - wr_idx));
|
||||
return (uint16_t)(2 * depth + diff);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -33,6 +33,18 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
// Configuration
|
||||
//--------------------------------------------------------------------+
|
||||
|
||||
#if CFG_TUD_ENABLED && CFG_TUD_VENDOR && (CFG_TUD_VENDOR_TX_BUFSIZE == 0 || CFG_TUD_VENDOR_RX_BUFSIZE == 0)
|
||||
#define CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED 1
|
||||
#endif
|
||||
|
||||
#ifndef CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED
|
||||
#define CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED 0
|
||||
#endif
|
||||
|
||||
#define TUP_USBIP_CONTROLLER_NUM 2
|
||||
extern tusb_role_t _tusb_rhport_role[TUP_USBIP_CONTROLLER_NUM];
|
||||
|
||||
|
||||
@ -821,7 +821,7 @@ static void channel_xfer_in_retry(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
|
||||
}
|
||||
}
|
||||
|
||||
#if CFG_TUSB_DEBUG
|
||||
#if CFG_TUSB_DEBUG && 0
|
||||
TU_ATTR_ALWAYS_INLINE static inline void print_hcint(uint32_t hcint) {
|
||||
const char* str[] = {
|
||||
"XFRC", "HALTED", "AHBERR", "STALL",
|
||||
|
||||
36
src/tusb.c
36
src/tusb.c
@ -338,6 +338,10 @@ bool tu_edpt_stream_init(tu_edpt_stream_t* s, bool is_host, bool is_tx, bool ove
|
||||
void* ff_buf, uint16_t ff_bufsize, uint8_t* ep_buf, uint16_t ep_bufsize) {
|
||||
(void) is_tx;
|
||||
|
||||
if (CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED == 0 && (ff_buf == NULL || ff_bufsize == 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
s->is_host = is_host;
|
||||
tu_fifo_config(&s->ff, ff_buf, ff_bufsize, 1, overwritable);
|
||||
|
||||
@ -367,7 +371,7 @@ bool tu_edpt_stream_deinit(tu_edpt_stream_t *s) {
|
||||
return true;
|
||||
}
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline bool stream_claim(uint8_t hwid, tu_edpt_stream_t* s) {
|
||||
static bool stream_claim(uint8_t hwid, tu_edpt_stream_t *s) {
|
||||
if (s->is_host) {
|
||||
#if CFG_TUH_ENABLED
|
||||
return usbh_edpt_claim(hwid, s->ep_addr);
|
||||
@ -380,7 +384,7 @@ TU_ATTR_ALWAYS_INLINE static inline bool stream_claim(uint8_t hwid, tu_edpt_stre
|
||||
return false;
|
||||
}
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline bool stream_xfer(uint8_t hwid, tu_edpt_stream_t* s, uint16_t count) {
|
||||
static bool stream_xfer(uint8_t hwid, tu_edpt_stream_t *s, uint16_t count) {
|
||||
if (s->is_host) {
|
||||
#if CFG_TUH_ENABLED
|
||||
return usbh_edpt_xfer(hwid, s->ep_addr, count ? s->ep_buf : NULL, count);
|
||||
@ -397,7 +401,7 @@ TU_ATTR_ALWAYS_INLINE static inline bool stream_xfer(uint8_t hwid, tu_edpt_strea
|
||||
return false;
|
||||
}
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline bool stream_release(uint8_t hwid, tu_edpt_stream_t* s) {
|
||||
static bool stream_release(uint8_t hwid, tu_edpt_stream_t *s) {
|
||||
if (s->is_host) {
|
||||
#if CFG_TUH_ENABLED
|
||||
return usbh_edpt_release(hwid, s->ep_addr);
|
||||
@ -447,8 +451,9 @@ uint32_t tu_edpt_stream_write_xfer(uint8_t hwid, tu_edpt_stream_t* s) {
|
||||
}
|
||||
|
||||
uint32_t tu_edpt_stream_write(uint8_t hwid, tu_edpt_stream_t *s, const void *buffer, uint32_t bufsize) {
|
||||
TU_VERIFY(bufsize > 0); // TODO support ZLP
|
||||
TU_VERIFY(bufsize > 0);
|
||||
|
||||
#if CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED
|
||||
if (0 == tu_fifo_depth(&s->ff)) {
|
||||
// non-fifo mode
|
||||
TU_VERIFY(stream_claim(hwid, s), 0);
|
||||
@ -464,7 +469,9 @@ uint32_t tu_edpt_stream_write(uint8_t hwid, tu_edpt_stream_t *s, const void *buf
|
||||
TU_ASSERT(stream_xfer(hwid, s, (uint16_t) xact_len), 0);
|
||||
|
||||
return xact_len;
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
const uint16_t ret = tu_fifo_write_n(&s->ff, buffer, (uint16_t) bufsize);
|
||||
|
||||
// flush if fifo has more than packet size or
|
||||
@ -477,10 +484,9 @@ uint32_t tu_edpt_stream_write(uint8_t hwid, tu_edpt_stream_t *s, const void *buf
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t tu_edpt_stream_write_available(uint8_t hwid, tu_edpt_stream_t* s) {
|
||||
if (tu_fifo_depth(&s->ff) > 0) {
|
||||
return (uint32_t) tu_fifo_remaining(&s->ff);
|
||||
} else {
|
||||
uint32_t tu_edpt_stream_write_available(uint8_t hwid, tu_edpt_stream_t *s) {
|
||||
#if CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED
|
||||
if (0 == tu_fifo_depth(&s->ff)) {
|
||||
// non-fifo mode
|
||||
bool is_busy = true;
|
||||
if (s->is_host) {
|
||||
@ -493,20 +499,28 @@ uint32_t tu_edpt_stream_write_available(uint8_t hwid, tu_edpt_stream_t* s) {
|
||||
#endif
|
||||
}
|
||||
return is_busy ? 0 : s->ep_bufsize;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
(void)hwid;
|
||||
return (uint32_t)tu_fifo_remaining(&s->ff);
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------+
|
||||
// Stream Read
|
||||
//--------------------------------------------------------------------+
|
||||
uint32_t tu_edpt_stream_read_xfer(uint8_t hwid, tu_edpt_stream_t* s) {
|
||||
uint32_t tu_edpt_stream_read_xfer(uint8_t hwid, tu_edpt_stream_t *s) {
|
||||
#if CFG_TUSB_EDPT_STREAM_NO_FIFO_ENABLED
|
||||
if (0 == tu_fifo_depth(&s->ff)) {
|
||||
// non-fifo mode: RX need ep buffer
|
||||
TU_VERIFY(s->ep_buf != NULL, 0);
|
||||
TU_VERIFY(stream_claim(hwid, s), 0);
|
||||
TU_ASSERT(stream_xfer(hwid, s, s->ep_bufsize), 0);
|
||||
return s->ep_bufsize;
|
||||
} else {
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
const uint16_t mps = s->is_mps512 ? TUSB_EPSIZE_BULK_HS : TUSB_EPSIZE_BULK_FS;
|
||||
uint16_t available = tu_fifo_remaining(&s->ff);
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ deps_mandatory = {
|
||||
'159e31b689577dbf69cf0683bbaffbd71fa5ee10',
|
||||
'all'],
|
||||
'tools/linkermap': ['https://github.com/hathach/linkermap.git',
|
||||
'8a8206c39d0dfd7abfa615a676b3291165fcd65c',
|
||||
'8e1f440fa15c567aceb5aa0d14f6d18c329cc67f',
|
||||
'all'],
|
||||
'tools/uf2': ['https://github.com/microsoft/uf2.git',
|
||||
'c594542b2faa01cc33a2b97c9fbebc38549df80a',
|
||||
|
||||
708
tools/metrics.py
708
tools/metrics.py
@ -1,15 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Calculate average size from multiple linker map files."""
|
||||
"""Calculate average sizes from bloaty CSV or TinyUSB metrics JSON outputs."""
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import glob
|
||||
import io
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add linkermap module to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'linkermap'))
|
||||
import linkermap
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def expand_files(file_patterns):
|
||||
@ -30,41 +29,105 @@ def expand_files(file_patterns):
|
||||
return expanded
|
||||
|
||||
|
||||
def combine_maps(map_files, filters=None):
|
||||
"""Combine multiple map files into a list of json_data.
|
||||
def parse_bloaty_csv(csv_text, filters=None):
|
||||
"""Parse bloaty CSV text and return normalized JSON data structure."""
|
||||
|
||||
Args:
|
||||
map_files: List of paths to linker map files or JSON files
|
||||
filters: List of path substrings to filter object files (default: [])
|
||||
|
||||
Returns:
|
||||
all_json_data: Dictionary with mapfiles list and data from each map file
|
||||
"""
|
||||
filters = filters or []
|
||||
all_json_data = {"mapfiles": [], "data": []}
|
||||
reader = csv.DictReader(io.StringIO(csv_text))
|
||||
size_by_unit = defaultdict(int)
|
||||
symbols_by_unit: dict[str, defaultdict[str, int]] = defaultdict(lambda: defaultdict(int))
|
||||
sections_by_unit: dict[str, defaultdict[str, int]] = defaultdict(lambda: defaultdict(int))
|
||||
|
||||
for map_file in map_files:
|
||||
if not os.path.exists(map_file):
|
||||
print(f"Warning: {map_file} not found, skipping", file=sys.stderr)
|
||||
for row in reader:
|
||||
compile_unit = row.get("compileunits") or row.get("compileunit") or row.get("path")
|
||||
if compile_unit is None:
|
||||
continue
|
||||
|
||||
if str(compile_unit).upper() == "TOTAL":
|
||||
continue
|
||||
|
||||
if filters and not any(filt in compile_unit for filt in filters):
|
||||
continue
|
||||
|
||||
try:
|
||||
if map_file.endswith('.json'):
|
||||
with open(map_file, 'r', encoding='utf-8') as f:
|
||||
vmsize = int(row.get("vmsize", 0))
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
size_by_unit[compile_unit] += vmsize
|
||||
symbol_name = row.get("symbols", "")
|
||||
if symbol_name:
|
||||
symbols_by_unit[compile_unit][symbol_name] += vmsize
|
||||
section_name = row.get("sections") or row.get("section")
|
||||
if section_name and vmsize:
|
||||
sections_by_unit[compile_unit][section_name] += vmsize
|
||||
|
||||
files = []
|
||||
for unit_path, total_size in size_by_unit.items():
|
||||
symbols = [
|
||||
{"name": sym, "size": sz}
|
||||
for sym, sz in sorted(symbols_by_unit[unit_path].items(), key=lambda x: x[1], reverse=True)
|
||||
]
|
||||
sections = {sec: sz for sec, sz in sections_by_unit[unit_path].items() if sz}
|
||||
files.append(
|
||||
{
|
||||
"file": os.path.basename(unit_path) or unit_path,
|
||||
"path": unit_path,
|
||||
"size": total_size,
|
||||
"total": total_size,
|
||||
"symbols": symbols,
|
||||
"sections": sections,
|
||||
}
|
||||
)
|
||||
|
||||
total_all = sum(size_by_unit.values())
|
||||
return {"files": files, "TOTAL": total_all}
|
||||
|
||||
|
||||
def combine_files(input_files, filters=None):
|
||||
"""Combine multiple metrics inputs (bloaty CSV or metrics JSON) into a single data set."""
|
||||
|
||||
filters = filters or []
|
||||
all_json_data = {"file_list": [], "data": []}
|
||||
|
||||
for fin in input_files:
|
||||
if not os.path.exists(fin):
|
||||
print(f"Warning: {fin} not found, skipping", file=sys.stderr)
|
||||
continue
|
||||
|
||||
try:
|
||||
if fin.endswith(".json"):
|
||||
with open(fin, "r", encoding="utf-8") as f:
|
||||
json_data = json.load(f)
|
||||
# Apply path filters to JSON data
|
||||
if filters:
|
||||
filtered_files = [
|
||||
f for f in json_data.get("files", [])
|
||||
json_data["files"] = [
|
||||
f
|
||||
for f in json_data.get("files", [])
|
||||
if f.get("path") and any(filt in f["path"] for filt in filters)
|
||||
]
|
||||
json_data["files"] = filtered_files
|
||||
elif fin.endswith(".csv"):
|
||||
with open(fin, "r", encoding="utf-8") as f:
|
||||
csv_text = f.read()
|
||||
json_data = parse_bloaty_csv(csv_text, filters)
|
||||
else:
|
||||
json_data = linkermap.analyze_map(map_file, filters=filters)
|
||||
all_json_data["mapfiles"].append(map_file)
|
||||
if fin.endswith(".elf"):
|
||||
print(f"Warning: {fin} is an ELF; please run bloaty with --csv output first. Skipping.",
|
||||
file=sys.stderr)
|
||||
else:
|
||||
print(f"Warning: {fin} is not a supported CSV or JSON metrics input. Skipping.",
|
||||
file=sys.stderr)
|
||||
continue
|
||||
|
||||
# Drop any fake TOTAL entries that slipped in as files
|
||||
json_data["files"] = [
|
||||
f for f in json_data.get("files", [])
|
||||
if str(f.get("file", "")).upper() != "TOTAL"
|
||||
]
|
||||
|
||||
all_json_data["file_list"].append(fin)
|
||||
all_json_data["data"].append(json_data)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to analyze {map_file}: {e}", file=sys.stderr)
|
||||
except Exception as e: # pragma: no cover - defensive
|
||||
print(f"Warning: Failed to analyze {fin}: {e}", file=sys.stderr)
|
||||
continue
|
||||
|
||||
return all_json_data
|
||||
@ -74,7 +137,7 @@ def compute_avg(all_json_data):
|
||||
"""Compute average sizes from combined json_data.
|
||||
|
||||
Args:
|
||||
all_json_data: Dictionary with mapfiles and data from combine_maps()
|
||||
all_json_data: Dictionary with file_list and data from combine_files()
|
||||
|
||||
Returns:
|
||||
json_average: Dictionary with averaged size data
|
||||
@ -82,117 +145,164 @@ def compute_avg(all_json_data):
|
||||
if not all_json_data["data"]:
|
||||
return None
|
||||
|
||||
# Collect all sections preserving order
|
||||
all_sections = []
|
||||
for json_data in all_json_data["data"]:
|
||||
for s in json_data["sections"]:
|
||||
if s not in all_sections:
|
||||
all_sections.append(s)
|
||||
|
||||
# Merge files with the same 'file' value and compute averages
|
||||
file_accumulator = {} # key: file name, value: {"sections": {section: [sizes]}, "totals": [totals]}
|
||||
file_accumulator = {} # key: file name, value: {"sizes": [sizes], "totals": [totals], "symbols": {name: [sizes]}, "sections": {name: [sizes]}}
|
||||
|
||||
for json_data in all_json_data["data"]:
|
||||
for f in json_data["files"]:
|
||||
for f in json_data.get("files", []):
|
||||
fname = f["file"]
|
||||
if fname not in file_accumulator:
|
||||
file_accumulator[fname] = {"sections": {}, "totals": [], "path": f.get("path")}
|
||||
file_accumulator[fname]["totals"].append(f["total"])
|
||||
for section, size in f["sections"].items():
|
||||
if section in file_accumulator[fname]["sections"]:
|
||||
file_accumulator[fname]["sections"][section].append(size)
|
||||
else:
|
||||
file_accumulator[fname]["sections"][section] = [size]
|
||||
file_accumulator[fname] = {
|
||||
"sizes": [],
|
||||
"totals": [],
|
||||
"path": f.get("path"),
|
||||
"symbols": defaultdict(list),
|
||||
"sections": defaultdict(list),
|
||||
}
|
||||
size_val = f.get("size", f.get("total", 0))
|
||||
file_accumulator[fname]["sizes"].append(size_val)
|
||||
file_accumulator[fname]["totals"].append(f.get("total", size_val))
|
||||
for sym in f.get("symbols", []):
|
||||
name = sym.get("name")
|
||||
if name is None:
|
||||
continue
|
||||
file_accumulator[fname]["symbols"][name].append(sym.get("size", 0))
|
||||
sections_map = f.get("sections") or {}
|
||||
for sname, ssize in sections_map.items():
|
||||
file_accumulator[fname]["sections"][sname].append(ssize)
|
||||
|
||||
# Build json_average with averaged values
|
||||
files_average = []
|
||||
for fname, data in file_accumulator.items():
|
||||
avg_total = round(sum(data["totals"]) / len(data["totals"]))
|
||||
avg_sections = {}
|
||||
for section, sizes in data["sections"].items():
|
||||
avg_sections[section] = round(sum(sizes) / len(sizes))
|
||||
files_average.append({
|
||||
"file": fname,
|
||||
"path": data["path"],
|
||||
"sections": avg_sections,
|
||||
"total": avg_total
|
||||
})
|
||||
avg_size = round(sum(data["sizes"]) / len(data["sizes"])) if data["sizes"] else 0
|
||||
symbols_avg = []
|
||||
for sym_name, sizes in data["symbols"].items():
|
||||
if not sizes:
|
||||
continue
|
||||
symbols_avg.append({"name": sym_name, "size": round(sum(sizes) / len(sizes))})
|
||||
symbols_avg.sort(key=lambda x: x["size"], reverse=True)
|
||||
sections_avg = {
|
||||
sec_name: round(sum(sizes) / len(sizes))
|
||||
for sec_name, sizes in data["sections"].items()
|
||||
if sizes
|
||||
}
|
||||
files_average.append(
|
||||
{
|
||||
"file": fname,
|
||||
"path": data["path"],
|
||||
"size": avg_size,
|
||||
"symbols": symbols_avg,
|
||||
"sections": sections_avg,
|
||||
}
|
||||
)
|
||||
|
||||
totals_list = [d.get("TOTAL") for d in all_json_data["data"] if isinstance(d.get("TOTAL"), (int, float))]
|
||||
total_size = round(sum(totals_list) / len(totals_list)) if totals_list else (
|
||||
sum(f["size"] for f in files_average) or 1)
|
||||
|
||||
for f in files_average:
|
||||
f["percent"] = (f["size"] / total_size) * 100 if total_size else 0
|
||||
for sym in f["symbols"]:
|
||||
sym["percent"] = (sym["size"] / f["size"]) * 100 if f["size"] else 0
|
||||
|
||||
json_average = {
|
||||
"mapfiles": all_json_data["mapfiles"],
|
||||
"sections": all_sections,
|
||||
"files": files_average
|
||||
"file_list": all_json_data["file_list"],
|
||||
"TOTAL": total_size,
|
||||
"files": files_average,
|
||||
}
|
||||
|
||||
return json_average
|
||||
|
||||
|
||||
def compare_maps(base_file, new_file, filters=None):
|
||||
"""Compare two map/json files and generate difference report.
|
||||
|
||||
Args:
|
||||
base_file: Path to base map/json file
|
||||
new_file: Path to new map/json file
|
||||
filters: List of path substrings to filter object files
|
||||
|
||||
Returns:
|
||||
Dictionary with comparison data
|
||||
"""
|
||||
def compare_files(base_file, new_file, filters=None):
|
||||
"""Compare two CSV or JSON inputs and generate difference report."""
|
||||
filters = filters or []
|
||||
|
||||
# Load both files
|
||||
base_data = combine_maps([base_file], filters)
|
||||
new_data = combine_maps([new_file], filters)
|
||||
|
||||
if not base_data["data"] or not new_data["data"]:
|
||||
return None
|
||||
|
||||
base_avg = compute_avg(base_data)
|
||||
new_avg = compute_avg(new_data)
|
||||
base_avg = compute_avg(combine_files([base_file], filters))
|
||||
new_avg = compute_avg(combine_files([new_file], filters))
|
||||
|
||||
if not base_avg or not new_avg:
|
||||
return None
|
||||
|
||||
# Collect all sections from both
|
||||
all_sections = list(base_avg["sections"])
|
||||
for s in new_avg["sections"]:
|
||||
if s not in all_sections:
|
||||
all_sections.append(s)
|
||||
|
||||
# Build file lookup
|
||||
base_files = {f["file"]: f for f in base_avg["files"]}
|
||||
new_files = {f["file"]: f for f in new_avg["files"]}
|
||||
|
||||
# Get all file names
|
||||
all_file_names = set(base_files.keys()) | set(new_files.keys())
|
||||
|
||||
# Build comparison data
|
||||
comparison = []
|
||||
comparison_files = []
|
||||
for fname in sorted(all_file_names):
|
||||
base_f = base_files.get(fname)
|
||||
new_f = new_files.get(fname)
|
||||
b = base_files.get(fname, {})
|
||||
n = new_files.get(fname, {})
|
||||
b_size = b.get("size", 0)
|
||||
n_size = n.get("size", 0)
|
||||
base_sections = b.get("sections") or {}
|
||||
new_sections = n.get("sections") or {}
|
||||
|
||||
row = {"file": fname, "sections": {}, "total": {}}
|
||||
# Symbol diffs
|
||||
b_syms = {s["name"]: s for s in b.get("symbols", [])}
|
||||
n_syms = {s["name"]: s for s in n.get("symbols", [])}
|
||||
all_syms = set(b_syms.keys()) | set(n_syms.keys())
|
||||
symbols = []
|
||||
for sym in all_syms:
|
||||
sb = b_syms.get(sym, {}).get("size", 0)
|
||||
sn = n_syms.get(sym, {}).get("size", 0)
|
||||
symbols.append({"name": sym, "base": sb, "new": sn, "diff": sn - sb})
|
||||
symbols.sort(key=lambda x: abs(x["diff"]), reverse=True)
|
||||
|
||||
for section in all_sections:
|
||||
base_val = base_f["sections"].get(section, 0) if base_f else 0
|
||||
new_val = new_f["sections"].get(section, 0) if new_f else 0
|
||||
row["sections"][section] = {"base": base_val, "new": new_val, "diff": new_val - base_val}
|
||||
comparison_files.append({
|
||||
"file": fname,
|
||||
"size": {"base": b_size, "new": n_size, "diff": n_size - b_size},
|
||||
"symbols": symbols,
|
||||
"sections": {
|
||||
name: {
|
||||
"base": base_sections.get(name, 0),
|
||||
"new": new_sections.get(name, 0),
|
||||
"diff": new_sections.get(name, 0) - base_sections.get(name, 0),
|
||||
}
|
||||
for name in sorted(set(base_sections) | set(new_sections))
|
||||
},
|
||||
})
|
||||
|
||||
base_total = base_f["total"] if base_f else 0
|
||||
new_total = new_f["total"] if new_f else 0
|
||||
row["total"] = {"base": base_total, "new": new_total, "diff": new_total - base_total}
|
||||
|
||||
comparison.append(row)
|
||||
total = {
|
||||
"base": base_avg.get("TOTAL", 0),
|
||||
"new": new_avg.get("TOTAL", 0),
|
||||
"diff": new_avg.get("TOTAL", 0) - base_avg.get("TOTAL", 0),
|
||||
}
|
||||
|
||||
return {
|
||||
"base_file": base_file,
|
||||
"new_file": new_file,
|
||||
"sections": all_sections,
|
||||
"files": comparison
|
||||
"total": total,
|
||||
"files": comparison_files,
|
||||
}
|
||||
|
||||
|
||||
def get_sort_key(sort_order):
|
||||
"""Get sort key function based on sort order.
|
||||
|
||||
Args:
|
||||
sort_order: One of 'size-', 'size+', 'name-', 'name+'
|
||||
|
||||
Returns:
|
||||
Tuple of (key_func, reverse)
|
||||
"""
|
||||
|
||||
def _size_val(entry):
|
||||
if isinstance(entry.get('total'), int):
|
||||
return entry.get('total', 0)
|
||||
if isinstance(entry.get('total'), dict):
|
||||
return entry['total'].get('new', 0)
|
||||
return entry.get('size', 0)
|
||||
|
||||
if sort_order == 'size-':
|
||||
return _size_val, True
|
||||
elif sort_order == 'size+':
|
||||
return _size_val, False
|
||||
elif sort_order == 'name-':
|
||||
return lambda x: x.get('file', ''), True
|
||||
else: # name+
|
||||
return lambda x: x.get('file', ''), False
|
||||
|
||||
|
||||
def format_diff(base, new, diff):
|
||||
"""Format a diff value with percentage."""
|
||||
if diff == 0:
|
||||
@ -204,149 +314,277 @@ def format_diff(base, new, diff):
|
||||
return f"{base} ➙ {new} ({sign}{diff}, {sign}{pct:.1f}%)"
|
||||
|
||||
|
||||
def get_sort_key(sort_order):
|
||||
"""Get sort key function based on sort order.
|
||||
def write_json_output(json_data, path):
|
||||
"""Write JSON output with indentation."""
|
||||
|
||||
Args:
|
||||
sort_order: One of 'size-', 'size+', 'name-', 'name+'
|
||||
with open(path, "w", encoding="utf-8") as outf:
|
||||
json.dump(json_data, outf, indent=2)
|
||||
|
||||
Returns:
|
||||
Tuple of (key_func, reverse)
|
||||
"""
|
||||
if sort_order == 'size-':
|
||||
return lambda x: x.get('total', 0) if isinstance(x.get('total'), int) else x['total']['new'], True
|
||||
elif sort_order == 'size+':
|
||||
return lambda x: x.get('total', 0) if isinstance(x.get('total'), int) else x['total']['new'], False
|
||||
elif sort_order == 'name-':
|
||||
return lambda x: x.get('file', ''), True
|
||||
else: # name+
|
||||
return lambda x: x.get('file', ''), False
|
||||
|
||||
def render_combine_table(json_data, sort_order='name+'):
|
||||
"""Render averaged sizes as markdown table lines (no title)."""
|
||||
files = json_data.get("files", [])
|
||||
if not files:
|
||||
return ["No entries."]
|
||||
|
||||
key_func, reverse = get_sort_key(sort_order)
|
||||
files_sorted = sorted(files, key=key_func, reverse=reverse)
|
||||
|
||||
total_size = json_data.get("TOTAL") or sum(f.get("size", 0) for f in files_sorted)
|
||||
|
||||
pct_strings = [
|
||||
f"{(f.get('percent') if f.get('percent') is not None else (f.get('size', 0) / total_size * 100 if total_size else 0)):.1f}%"
|
||||
for f in files_sorted]
|
||||
pct_width = 6
|
||||
size_width = max(len("size"), *(len(str(f.get("size", 0))) for f in files_sorted), len(str(total_size)))
|
||||
file_width = max(len("File"), *(len(f.get("file", "")) for f in files_sorted), len("TOTAL"))
|
||||
|
||||
# Build section totals on the fly from file data
|
||||
sections_global = defaultdict(int)
|
||||
for f in files_sorted:
|
||||
for name, size in (f.get("sections") or {}).items():
|
||||
sections_global[name] += size
|
||||
# Display sections in reverse alphabetical order for stable column layout
|
||||
section_names = sorted(sections_global.keys(), reverse=True)
|
||||
section_widths = {}
|
||||
for name in section_names:
|
||||
max_val = max((f.get("sections", {}).get(name, 0) for f in files_sorted), default=0)
|
||||
section_widths[name] = max(len(name), len(str(max_val)), 1)
|
||||
|
||||
if not section_names:
|
||||
header = f"| {'File':<{file_width}} | {'size':>{size_width}} | {'%':>{pct_width}} |"
|
||||
separator = f"| :{'-' * (file_width - 1)} | {'-' * (size_width - 1)}: | {'-' * (pct_width - 1)}: |"
|
||||
else:
|
||||
header_parts = [f"| {'File':<{file_width}} |"]
|
||||
sep_parts = [f"| :{'-' * (file_width - 1)} |"]
|
||||
for name in section_names:
|
||||
header_parts.append(f" {name:>{section_widths[name]}} |")
|
||||
sep_parts.append(f" {'-' * (section_widths[name] - 1)}: |")
|
||||
header_parts.append(f" {'size':>{size_width}} | {'%':>{pct_width}} |")
|
||||
sep_parts.append(f" {'-' * (size_width - 1)}: | {'-' * (pct_width - 1)}: |")
|
||||
header = "".join(header_parts)
|
||||
separator = "".join(sep_parts)
|
||||
|
||||
lines = [header, separator]
|
||||
|
||||
for f, pct_str in zip(files_sorted, pct_strings):
|
||||
size_val = f.get("size", 0)
|
||||
parts = [f"| {f.get('file', ''):<{file_width}} |"]
|
||||
if section_names:
|
||||
sections_map = f.get("sections") or {}
|
||||
for name in section_names:
|
||||
parts.append(f" {sections_map.get(name, 0):>{section_widths[name]}} |")
|
||||
parts.append(f" {size_val:>{size_width}} | {pct_str:>{pct_width}} |")
|
||||
lines.append("".join(parts))
|
||||
|
||||
total_parts = [f"| {'TOTAL':<{file_width}} |"]
|
||||
if section_names:
|
||||
for name in section_names:
|
||||
total_parts.append(f" {sections_global.get(name, 0):>{section_widths[name]}} |")
|
||||
total_parts.append(f" {total_size:>{size_width}} | {'100.0%':>{pct_width}} |")
|
||||
lines.append("".join(total_parts))
|
||||
return lines
|
||||
|
||||
|
||||
def write_combine_markdown(json_data, path, sort_order='name+', title="TinyUSB Average Code Size Metrics"):
|
||||
"""Write averaged size data to a markdown file."""
|
||||
|
||||
md_lines = [f"# {title}", ""]
|
||||
md_lines.extend(render_combine_table(json_data, sort_order))
|
||||
md_lines.append("")
|
||||
|
||||
if json_data.get("file_list"):
|
||||
md_lines.extend(["<details>", "<summary>Input files</summary>", ""])
|
||||
md_lines.extend([f"- {mf}" for mf in json_data["file_list"]])
|
||||
md_lines.extend(["", "</details>", ""])
|
||||
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(md_lines))
|
||||
|
||||
|
||||
def write_compare_markdown(comparison, path, sort_order='size'):
|
||||
"""Write comparison data to markdown file."""
|
||||
sections = comparison["sections"]
|
||||
|
||||
md_lines = [
|
||||
"# Size Difference Report",
|
||||
"",
|
||||
"Because TinyUSB code size varies by port and configuration, the metrics below represent the averaged totals across all example builds."
|
||||
"Because TinyUSB code size varies by port and configuration, the metrics below represent the averaged totals across all example builds.",
|
||||
"",
|
||||
"Note: If there is no change, only one value is shown.",
|
||||
"",
|
||||
]
|
||||
|
||||
# Build header
|
||||
header = "| File |"
|
||||
separator = "|:-----|"
|
||||
for s in sections:
|
||||
header += f" {s} |"
|
||||
separator += "-----:|"
|
||||
header += " Total |"
|
||||
separator += "------:|"
|
||||
significant, minor, unchanged = _split_by_significance(comparison["files"], sort_order)
|
||||
|
||||
def is_significant(file_row):
|
||||
for s in sections:
|
||||
sd = file_row["sections"][s]
|
||||
diff = abs(sd["diff"])
|
||||
base = sd["base"]
|
||||
if base == 0:
|
||||
if diff != 0:
|
||||
return True
|
||||
else:
|
||||
if (diff / base) * 100 > 1.0:
|
||||
return True
|
||||
return False
|
||||
|
||||
# Sort files based on sort_order
|
||||
if sort_order == 'size-':
|
||||
key_func = lambda x: abs(x["total"]["diff"])
|
||||
reverse = True
|
||||
elif sort_order in ('size', 'size+'):
|
||||
key_func = lambda x: abs(x["total"]["diff"])
|
||||
reverse = False
|
||||
elif sort_order == 'name-':
|
||||
key_func = lambda x: x['file']
|
||||
reverse = True
|
||||
else: # name or name+
|
||||
key_func = lambda x: x['file']
|
||||
reverse = False
|
||||
sorted_files = sorted(comparison["files"], key=key_func, reverse=reverse)
|
||||
|
||||
significant = []
|
||||
minor = []
|
||||
unchanged = []
|
||||
for f in sorted_files:
|
||||
no_change = f["total"]["diff"] == 0 and all(f["sections"][s]["diff"] == 0 for s in sections)
|
||||
if no_change:
|
||||
unchanged.append(f)
|
||||
else:
|
||||
(significant if is_significant(f) else minor).append(f)
|
||||
|
||||
def render_table(title, rows, collapsed=False):
|
||||
def render(title, rows, collapsed=False):
|
||||
if collapsed:
|
||||
md_lines.append(f"<details><summary>{title}</summary>")
|
||||
md_lines.append("")
|
||||
else:
|
||||
md_lines.append(f"## {title}")
|
||||
|
||||
if not rows:
|
||||
md_lines.append("No entries.")
|
||||
md_lines.append("")
|
||||
if collapsed:
|
||||
md_lines.append("</details>")
|
||||
md_lines.append("")
|
||||
return
|
||||
|
||||
md_lines.append(header)
|
||||
md_lines.append(separator)
|
||||
|
||||
sum_base = {s: 0 for s in sections}
|
||||
sum_base["total"] = 0
|
||||
sum_new = {s: 0 for s in sections}
|
||||
sum_new["total"] = 0
|
||||
|
||||
for f in rows:
|
||||
row = f"| {f['file']} |"
|
||||
for s in sections:
|
||||
sd = f["sections"][s]
|
||||
sum_base[s] += sd["base"]
|
||||
sum_new[s] += sd["new"]
|
||||
row += f" {format_diff(sd['base'], sd['new'], sd['diff'])} |"
|
||||
|
||||
td = f["total"]
|
||||
sum_base["total"] += td["base"]
|
||||
sum_new["total"] += td["new"]
|
||||
row += f" {format_diff(td['base'], td['new'], td['diff'])} |"
|
||||
|
||||
md_lines.append(row)
|
||||
|
||||
# Add sum row
|
||||
sum_row = "| **SUM** |"
|
||||
for s in sections:
|
||||
diff = sum_new[s] - sum_base[s]
|
||||
sum_row += f" {format_diff(sum_base[s], sum_new[s], diff)} |"
|
||||
total_diff = sum_new["total"] - sum_base["total"]
|
||||
sum_row += f" {format_diff(sum_base['total'], sum_new['total'], total_diff)} |"
|
||||
md_lines.append(sum_row)
|
||||
md_lines.extend(render_compare_table(_build_rows(rows, sort_order), include_sum=True))
|
||||
md_lines.append("")
|
||||
|
||||
if collapsed:
|
||||
md_lines.append("</details>")
|
||||
md_lines.append("")
|
||||
|
||||
render_table("Changes >1% in any section", significant)
|
||||
render_table("Changes <1% in all sections", minor)
|
||||
render_table("No changes", unchanged, collapsed=True)
|
||||
render("Changes >1% in size", significant)
|
||||
render("Changes <1% in size", minor)
|
||||
render("No changes", unchanged, collapsed=True)
|
||||
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(md_lines))
|
||||
|
||||
|
||||
def print_compare_summary(comparison, sort_order='name+'):
|
||||
"""Print diff report to stdout in table form."""
|
||||
|
||||
files = comparison["files"]
|
||||
|
||||
rows = _build_rows(files, sort_order)
|
||||
lines = render_compare_table(rows, include_sum=True)
|
||||
for line in lines:
|
||||
print(line)
|
||||
|
||||
|
||||
def _build_rows(files, sort_order):
|
||||
"""Sort files and prepare printable fields."""
|
||||
|
||||
def sort_key(file_row):
|
||||
if sort_order == 'size-':
|
||||
return abs(file_row["size"]["diff"])
|
||||
if sort_order in ('size', 'size+'):
|
||||
return abs(file_row["size"]["diff"])
|
||||
if sort_order == 'name-':
|
||||
return file_row['file']
|
||||
return file_row['file']
|
||||
|
||||
reverse = sort_order in ('size-', 'name-')
|
||||
files_sorted = sorted(files, key=sort_key, reverse=reverse)
|
||||
|
||||
rows = []
|
||||
for f in files_sorted:
|
||||
sd = f["size"]
|
||||
diff_val = sd['new'] - sd['base']
|
||||
if sd['base'] == 0:
|
||||
pct_str = "n/a"
|
||||
else:
|
||||
pct_val = (diff_val / sd['base']) * 100
|
||||
pct_str = f"{pct_val:+.1f}%"
|
||||
rows.append({
|
||||
"file": f['file'],
|
||||
"base": sd['base'],
|
||||
"new": sd['new'],
|
||||
"diff": diff_val,
|
||||
"pct": pct_str,
|
||||
"sections": f.get("sections", {}),
|
||||
})
|
||||
return rows
|
||||
|
||||
|
||||
def _split_by_significance(files, sort_order):
|
||||
"""Split files into >1% changes, <1% changes, and no changes."""
|
||||
|
||||
def is_significant(file_row):
|
||||
base = file_row["size"]["base"]
|
||||
diff = abs(file_row["size"]["diff"])
|
||||
if base == 0:
|
||||
return diff != 0
|
||||
return (diff / base) * 100 > 1.0
|
||||
|
||||
rows_sorted = sorted(
|
||||
files,
|
||||
key=lambda f: abs(f["size"]["diff"]) if sort_order.startswith("size") else f["file"],
|
||||
reverse=sort_order in ('size-', 'name-'),
|
||||
)
|
||||
|
||||
significant = []
|
||||
minor = []
|
||||
unchanged = []
|
||||
for f in rows_sorted:
|
||||
if f["size"]["diff"] == 0:
|
||||
unchanged.append(f)
|
||||
else:
|
||||
(significant if is_significant(f) else minor).append(f)
|
||||
|
||||
return significant, minor, unchanged
|
||||
|
||||
|
||||
def render_compare_table(rows, include_sum):
|
||||
"""Return markdown table lines for given rows."""
|
||||
if not rows:
|
||||
return ["No entries.", ""]
|
||||
|
||||
# collect section columns (reverse alpha)
|
||||
section_names = sorted(
|
||||
{name for r in rows for name in (r.get("sections") or {})},
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
def fmt_abs(val_old, val_new):
|
||||
diff = val_new - val_old
|
||||
if diff == 0:
|
||||
return f"{val_new}"
|
||||
sign = "+" if diff > 0 else ""
|
||||
return f"{val_old} ➙ {val_new} ({sign}{diff})"
|
||||
|
||||
sum_base = sum(r["base"] for r in rows)
|
||||
sum_new = sum(r["new"] for r in rows)
|
||||
total_diff = sum_new - sum_base
|
||||
total_pct = "n/a" if sum_base == 0 else f"{(total_diff / sum_base) * 100:+.1f}%"
|
||||
|
||||
file_width = max(len("file"), *(len(r["file"]) for r in rows), len("TOTAL"))
|
||||
size_width = max(
|
||||
len("size"),
|
||||
*(len(fmt_abs(r["base"], r["new"])) for r in rows),
|
||||
len(fmt_abs(sum_base, sum_new)),
|
||||
)
|
||||
pct_width = max(len("% diff"), *(len(r["pct"]) for r in rows), len(total_pct))
|
||||
section_widths = {}
|
||||
for name in section_names:
|
||||
max_val_len = 0
|
||||
for r in rows:
|
||||
sec_entry = (r.get("sections") or {}).get(name, {"base": 0, "new": 0})
|
||||
max_val_len = max(max_val_len, len(fmt_abs(sec_entry.get("base", 0), sec_entry.get("new", 0))))
|
||||
section_widths[name] = max(len(name), max_val_len, 1)
|
||||
|
||||
header_parts = [f"| {'file':<{file_width}} |"]
|
||||
sep_parts = [f"| :{'-' * (file_width - 1)} |"]
|
||||
for name in section_names:
|
||||
header_parts.append(f" {name:>{section_widths[name]}} |")
|
||||
sep_parts.append(f" {'-' * (section_widths[name] - 1)}: |")
|
||||
header_parts.append(f" {'size':>{size_width}} | {'% diff':>{pct_width}} |")
|
||||
sep_parts.append(f" {'-' * (size_width - 1)}: | {'-' * (pct_width - 1)}: |")
|
||||
header = "".join(header_parts)
|
||||
separator = "".join(sep_parts)
|
||||
|
||||
lines = [header, separator]
|
||||
|
||||
for r in rows:
|
||||
parts = [f"| {r['file']:<{file_width}} |"]
|
||||
sections_map = r.get("sections") or {}
|
||||
for name in section_names:
|
||||
sec_entry = sections_map.get(name, {"base": 0, "new": 0})
|
||||
parts.append(f" {fmt_abs(sec_entry.get('base', 0), sec_entry.get('new', 0)):>{section_widths[name]}} |")
|
||||
parts.append(f" {fmt_abs(r['base'], r['new']):>{size_width}} | {r['pct']:>{pct_width}} |")
|
||||
lines.append("".join(parts))
|
||||
|
||||
if include_sum:
|
||||
total_parts = [f"| {'TOTAL':<{file_width}} |"]
|
||||
for name in section_names:
|
||||
total_base = sum((r.get("sections") or {}).get(name, {}).get("base", 0) for r in rows)
|
||||
total_new = sum((r.get("sections") or {}).get(name, {}).get("new", 0) for r in rows)
|
||||
total_parts.append(f" {fmt_abs(total_base, total_new):>{section_widths[name]}} |")
|
||||
total_parts.append(f" {fmt_abs(sum_base, sum_new):>{size_width}} | {total_pct:>{pct_width}} |")
|
||||
lines.append("".join(total_parts))
|
||||
return lines
|
||||
|
||||
|
||||
def cmd_combine(args):
|
||||
"""Handle combine subcommand."""
|
||||
map_files = expand_files(args.files)
|
||||
all_json_data = combine_maps(map_files, args.filters)
|
||||
input_files = expand_files(args.files)
|
||||
all_json_data = combine_files(input_files, args.filters)
|
||||
json_average = compute_avg(all_json_data)
|
||||
|
||||
if json_average is None:
|
||||
@ -354,24 +592,29 @@ def cmd_combine(args):
|
||||
sys.exit(1)
|
||||
|
||||
if not args.quiet:
|
||||
linkermap.print_summary(json_average, False, args.sort)
|
||||
for line in render_combine_table(json_average, sort_order=args.sort):
|
||||
print(line)
|
||||
if args.json_out:
|
||||
linkermap.write_json(json_average, args.out + '.json')
|
||||
write_json_output(json_average, args.out + '.json')
|
||||
if args.markdown_out:
|
||||
linkermap.write_markdown(json_average, args.out + '.md', sort_opt=args.sort,
|
||||
title="TinyUSB Average Code Size Metrics")
|
||||
write_combine_markdown(json_average, args.out + '.md', sort_order=args.sort,
|
||||
title="TinyUSB Average Code Size Metrics")
|
||||
|
||||
|
||||
def cmd_compare(args):
|
||||
"""Handle compare subcommand."""
|
||||
comparison = compare_maps(args.base, args.new, args.filters)
|
||||
comparison = compare_files(args.base, args.new, args.filters)
|
||||
|
||||
if comparison is None:
|
||||
print("Failed to compare files", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
write_compare_markdown(comparison, args.out + '.md', args.sort)
|
||||
print(f"Comparison written to {args.out}.md")
|
||||
if not args.quiet:
|
||||
print_compare_summary(comparison, args.sort)
|
||||
if args.markdown_out:
|
||||
write_compare_markdown(comparison, args.out + '.md', args.sort)
|
||||
if not args.quiet:
|
||||
print(f"Comparison written to {args.out}.md")
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
@ -379,10 +622,11 @@ def main(argv=None):
|
||||
subparsers = parser.add_subparsers(dest='command', required=True, help='Available commands')
|
||||
|
||||
# Combine subcommand
|
||||
combine_parser = subparsers.add_parser('combine', help='Combine and average multiple map files')
|
||||
combine_parser.add_argument('files', nargs='+', help='Path to map file(s) or glob pattern(s)')
|
||||
combine_parser = subparsers.add_parser('combine', help='Combine and average bloaty CSV outputs or metrics JSON files')
|
||||
combine_parser.add_argument('files', nargs='+',
|
||||
help='Path to bloaty CSV output or TinyUSB metrics JSON file(s) (including linkermap-generated) or glob pattern(s)')
|
||||
combine_parser.add_argument('-f', '--filter', dest='filters', action='append', default=[],
|
||||
help='Only include object files whose path contains this substring (can be repeated)')
|
||||
help='Only include compile units whose path contains this substring (can be repeated)')
|
||||
combine_parser.add_argument('-o', '--out', dest='out', default='metrics',
|
||||
help='Output path basename for JSON and Markdown files (default: metrics)')
|
||||
combine_parser.add_argument('-j', '--json', dest='json_out', action='store_true',
|
||||
@ -391,21 +635,25 @@ def main(argv=None):
|
||||
help='Write Markdown output file')
|
||||
combine_parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
|
||||
help='Suppress summary output')
|
||||
combine_parser.add_argument('-S', '--sort', dest='sort', default='name+',
|
||||
combine_parser.add_argument('-S', '--sort', dest='sort', default='size-',
|
||||
choices=['size', 'size-', 'size+', 'name', 'name-', 'name+'],
|
||||
help='Sort order: size/size- (descending), size+ (ascending), name/name+ (ascending), name- (descending). Default: name+')
|
||||
help='Sort order: size/size- (descending), size+ (ascending), name/name+ (ascending), name- (descending). Default: size-')
|
||||
|
||||
# Compare subcommand
|
||||
compare_parser = subparsers.add_parser('compare', help='Compare two map files')
|
||||
compare_parser.add_argument('base', help='Base map/json file')
|
||||
compare_parser.add_argument('new', help='New map/json file')
|
||||
compare_parser = subparsers.add_parser('compare', help='Compare two metrics inputs (bloaty CSV or metrics JSON)')
|
||||
compare_parser.add_argument('base', help='Base CSV/metrics JSON file')
|
||||
compare_parser.add_argument('new', help='New CSV/metrics JSON file')
|
||||
compare_parser.add_argument('-f', '--filter', dest='filters', action='append', default=[],
|
||||
help='Only include object files whose path contains this substring (can be repeated)')
|
||||
help='Only include compile units whose path contains this substring (can be repeated)')
|
||||
compare_parser.add_argument('-o', '--out', dest='out', default='metrics_compare',
|
||||
help='Output path basename for Markdown file (default: metrics_compare)')
|
||||
help='Output path basename for Markdown/JSON files (default: metrics_compare)')
|
||||
compare_parser.add_argument('-m', '--markdown', dest='markdown_out', action='store_true',
|
||||
help='Write Markdown output file')
|
||||
compare_parser.add_argument('-S', '--sort', dest='sort', default='name+',
|
||||
choices=['size', 'size-', 'size+', 'name', 'name-', 'name+'],
|
||||
help='Sort order: size/size- (descending), size+ (ascending), name/name+ (ascending), name- (descending). Default: name+')
|
||||
compare_parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
|
||||
help='Suppress stdout summary output')
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user