mirror of
https://github.com/littlefs-project/littlefs.git
synced 2025-10-29 19:47:49 +00:00
Reworked scripts to move field details into classes
These scripts can't easily share the common logic, but separating field details from the print/merge/csv logic should make the common part of these scripts much easier to create/modify going forward. This also tweaked the behavior of summary.py slightly.
This commit is contained in:
parent
4a7e94fb15
commit
5b0a6d4747
2
.gitignore
vendored
2
.gitignore
vendored
@ -6,6 +6,8 @@
|
||||
*.csv
|
||||
*.t.c
|
||||
*.a.c
|
||||
*.gcno
|
||||
*.gcda
|
||||
|
||||
# Testing things
|
||||
blocks/
|
||||
|
||||
6
Makefile
6
Makefile
@ -74,9 +74,6 @@ endif
|
||||
ifdef EXEC
|
||||
override TESTFLAGS += --exec="$(EXEC)"
|
||||
endif
|
||||
ifdef COVERAGE
|
||||
override TESTFLAGS += --coverage
|
||||
endif
|
||||
ifdef BUILDDIR
|
||||
override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
|
||||
@ -164,9 +161,6 @@ $(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
|
||||
./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
|
||||
./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
|
||||
./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
|
||||
$(if $(COVERAGE),\
|
||||
./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
|
||||
-q -m $@ $(COVERAGEFLAGS) -o $@)
|
||||
|
||||
$(BUILDDIR)runners/test_runner: $(TEST_TAOBJ)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
256
scripts/code.py
256
scripts/code.py
@ -17,6 +17,61 @@ import collections as co
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
class CodeResult(co.namedtuple('CodeResult', 'code_size')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, code_size=0):
|
||||
return super().__new__(cls, int(code_size))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.code_size + other.code_size)
|
||||
|
||||
def __sub__(self, other):
|
||||
return CodeDiff(other, self)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self, **args):
|
||||
if args.get('size_sort'):
|
||||
return -self.code_size
|
||||
elif args.get('reverse_size_sort'):
|
||||
return +self.code_size
|
||||
else:
|
||||
return None
|
||||
|
||||
_header = '%7s' % 'size'
|
||||
def __str__(self):
|
||||
return '%7d' % self.code_size
|
||||
|
||||
class CodeDiff(co.namedtuple('CodeDiff', 'old,new')):
|
||||
__slots__ = ()
|
||||
|
||||
def ratio(self):
|
||||
old = self.old.code_size if self.old is not None else 0
|
||||
new = self.new.code_size if self.new is not None else 0
|
||||
return (new-old) / old if old else 1.0
|
||||
|
||||
def key(self, **args):
|
||||
return (
|
||||
self.new.key(**args) if self.new is not None else 0,
|
||||
-self.ratio())
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.ratio())
|
||||
|
||||
_header = '%7s %7s %7s' % ('old', 'new', 'diff')
|
||||
def __str__(self):
|
||||
old = self.old.code_size if self.old is not None else 0
|
||||
new = self.new.code_size if self.new is not None else 0
|
||||
diff = new - old
|
||||
ratio = self.ratio()
|
||||
return '%7s %7s %+7d%s' % (
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else '')
|
||||
|
||||
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
@ -27,12 +82,17 @@ def openio(path, mode='r'):
|
||||
return open(path, mode)
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
results = co.defaultdict(lambda: CodeResult())
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# map to source file
|
||||
src_path = re.sub('\.o$', '.c', path)
|
||||
if args.get('build_dir'):
|
||||
src_path = re.sub('%s/*' % re.escape(args['build_dir']), '',
|
||||
src_path)
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
@ -45,7 +105,14 @@ def collect(paths, **args):
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
func = m.group('func')
|
||||
# discard internal functions
|
||||
if not args.get('everything') and func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
results[(src_path, func)] += CodeResult(
|
||||
int(m.group('size'), 16))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
@ -53,24 +120,7 @@ def collect(paths, **args):
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
return results
|
||||
|
||||
def main(**args):
|
||||
# find sizes
|
||||
@ -92,35 +142,27 @@ def main(**args):
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
results = {
|
||||
(result['file'], result['name']): CodeResult(
|
||||
*(result[f] for f in CodeResult._fields))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in CodeResult._fields)}
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
prev_results = {
|
||||
(result['file'], result['name']): CodeResult(
|
||||
*(result[f] for f in CodeResult._fields))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in CodeResult._fields)}
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
@ -134,111 +176,87 @@ def main(**args):
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('code_size', None)
|
||||
for f in CodeResult._fields:
|
||||
result.pop(f, None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['code_size'] = size
|
||||
for (file, func), result in results.items():
|
||||
merged_results[(file, func)] |= result._asdict()
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
|
||||
w = csv.DictWriter(f, ['file', 'name',
|
||||
*other_fields, *CodeResult._fields])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
def print_header(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
print('%-36s %s' % (by, CodeResult._header))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
old = {entry(k) for k in results.keys()}
|
||||
new = {entry(k) for k in prev_results.keys()}
|
||||
print('%-36s %s' % (
|
||||
'%s (%d added, %d removed)' % (by,
|
||||
sum(1 for k in new if k not in old),
|
||||
sum(1 for k in old if k not in new))
|
||||
if by else '',
|
||||
CodeDiff._header))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
def print_entries(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
entries = co.defaultdict(lambda: CodeResult())
|
||||
for k, result in results.items():
|
||||
entries[entry(k)] += result
|
||||
|
||||
if not args.get('diff'):
|
||||
for name, result in sorted(entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p)):
|
||||
print('%-36s %s' % (name, result))
|
||||
else:
|
||||
prev_entries = co.defaultdict(lambda: CodeResult())
|
||||
for k, result in prev_results.items():
|
||||
prev_entries[entry(k)] += result
|
||||
|
||||
diff_entries = {name: entries.get(name) - prev_entries.get(name)
|
||||
for name in (entries.keys() | prev_entries.keys())}
|
||||
|
||||
for name, diff in sorted(diff_entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p)):
|
||||
if diff or args.get('all'):
|
||||
print('%-36s %s' % (name, diff))
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
print_header('')
|
||||
print_entries('total')
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
print_header('file')
|
||||
print_entries('file')
|
||||
print_entries('total')
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
print_header('function')
|
||||
print_entries('function')
|
||||
print_entries('total')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
@ -21,6 +21,140 @@ import subprocess as sp
|
||||
|
||||
GCDA_PATHS = ['*.gcda']
|
||||
|
||||
class CoverageResult(co.namedtuple('CoverageResult',
|
||||
'coverage_line_hits,coverage_line_count,'
|
||||
'coverage_branch_hits,coverage_branch_count')):
|
||||
__slots__ = ()
|
||||
def __new__(cls,
|
||||
coverage_line_hits=0, coverage_line_count=0,
|
||||
coverage_branch_hits=0, coverage_branch_count=0):
|
||||
return super().__new__(cls,
|
||||
int(coverage_line_hits),
|
||||
int(coverage_line_count),
|
||||
int(coverage_branch_hits),
|
||||
int(coverage_branch_count))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
self.coverage_line_hits + other.coverage_line_hits,
|
||||
self.coverage_line_count + other.coverage_line_count,
|
||||
self.coverage_branch_hits + other.coverage_branch_hits,
|
||||
self.coverage_branch_count + other.coverage_branch_count)
|
||||
|
||||
def __sub__(self, other):
|
||||
return CoverageDiff(other, self)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self, **args):
|
||||
ratio_line = (self.coverage_line_hits/self.coverage_line_count
|
||||
if self.coverage_line_count else -1)
|
||||
ratio_branch = (self.coverage_branch_hits/self.coverage_branch_count
|
||||
if self.coverage_branch_count else -1)
|
||||
|
||||
if args.get('line_sort'):
|
||||
return (-ratio_line, -ratio_branch)
|
||||
elif args.get('reverse_line_sort'):
|
||||
return (+ratio_line, +ratio_branch)
|
||||
elif args.get('branch_sort'):
|
||||
return (-ratio_branch, -ratio_line)
|
||||
elif args.get('reverse_branch_sort'):
|
||||
return (+ratio_branch, +ratio_line)
|
||||
else:
|
||||
return None
|
||||
|
||||
_header = '%19s %19s' % ('hits/line', 'hits/branch')
|
||||
def __str__(self):
|
||||
line_hits = self.coverage_line_hits
|
||||
line_count = self.coverage_line_count
|
||||
branch_hits = self.coverage_branch_hits
|
||||
branch_count = self.coverage_branch_count
|
||||
return '%11s %7s %11s %7s' % (
|
||||
'%d/%d' % (line_hits, line_count)
|
||||
if line_count else '-',
|
||||
'%.1f%%' % (100*line_hits/line_count)
|
||||
if line_count else '-',
|
||||
'%d/%d' % (branch_hits, branch_count)
|
||||
if branch_count else '-',
|
||||
'%.1f%%' % (100*branch_hits/branch_count)
|
||||
if branch_count else '-')
|
||||
|
||||
class CoverageDiff(co.namedtuple('CoverageDiff', 'old,new')):
|
||||
__slots__ = ()
|
||||
|
||||
def ratio_line(self):
|
||||
old_line_hits = (self.old.coverage_line_hits
|
||||
if self.old is not None else 0)
|
||||
old_line_count = (self.old.coverage_line_count
|
||||
if self.old is not None else 0)
|
||||
new_line_hits = (self.new.coverage_line_hits
|
||||
if self.new is not None else 0)
|
||||
new_line_count = (self.new.coverage_line_count
|
||||
if self.new is not None else 0)
|
||||
return ((new_line_hits/new_line_count if new_line_count else 1.0)
|
||||
- (old_line_hits/old_line_count if old_line_count else 1.0))
|
||||
|
||||
def ratio_branch(self):
|
||||
old_branch_hits = (self.old.coverage_branch_hits
|
||||
if self.old is not None else 0)
|
||||
old_branch_count = (self.old.coverage_branch_count
|
||||
if self.old is not None else 0)
|
||||
new_branch_hits = (self.new.coverage_branch_hits
|
||||
if self.new is not None else 0)
|
||||
new_branch_count = (self.new.coverage_branch_count
|
||||
if self.new is not None else 0)
|
||||
return ((new_branch_hits/new_branch_count if new_branch_count else 1.0)
|
||||
- (old_branch_hits/old_branch_count if old_branch_count else 1.0))
|
||||
|
||||
def key(self, **args):
|
||||
return (
|
||||
self.new.key(**args) if self.new is not None else 0,
|
||||
-self.ratio_line(),
|
||||
-self.ratio_branch())
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.ratio_line() or self.ratio_branch())
|
||||
|
||||
_header = '%23s %23s %23s' % ('old', 'new', 'diff')
|
||||
def __str__(self):
|
||||
old_line_hits = (self.old.coverage_line_hits
|
||||
if self.old is not None else 0)
|
||||
old_line_count = (self.old.coverage_line_count
|
||||
if self.old is not None else 0)
|
||||
old_branch_hits = (self.old.coverage_branch_hits
|
||||
if self.old is not None else 0)
|
||||
old_branch_count = (self.old.coverage_branch_count
|
||||
if self.old is not None else 0)
|
||||
new_line_hits = (self.new.coverage_line_hits
|
||||
if self.new is not None else 0)
|
||||
new_line_count = (self.new.coverage_line_count
|
||||
if self.new is not None else 0)
|
||||
new_branch_hits = (self.new.coverage_branch_hits
|
||||
if self.new is not None else 0)
|
||||
new_branch_count = (self.new.coverage_branch_count
|
||||
if self.new is not None else 0)
|
||||
diff_line_hits = new_line_hits - old_line_hits
|
||||
diff_line_count = new_line_count - old_line_count
|
||||
diff_branch_hits = new_branch_hits - old_branch_hits
|
||||
diff_branch_count = new_branch_count - old_branch_count
|
||||
ratio_line = self.ratio_line()
|
||||
ratio_branch = self.ratio_branch()
|
||||
return '%11s %11s %11s %11s %11s %11s%-10s%s' % (
|
||||
'%d/%d' % (old_line_hits, old_line_count)
|
||||
if old_line_count else '-',
|
||||
'%d/%d' % (old_branch_hits, old_branch_count)
|
||||
if old_branch_count else '-',
|
||||
'%d/%d' % (new_line_hits, new_line_count)
|
||||
if new_line_count else '-',
|
||||
'%d/%d' % (new_branch_hits, new_branch_count)
|
||||
if new_branch_count else '-',
|
||||
'%+d/%+d' % (diff_line_hits, diff_line_count),
|
||||
'%+d/%+d' % (diff_branch_hits, diff_branch_count),
|
||||
' (%+.1f%%)' % (100*ratio_line) if ratio_line else '',
|
||||
' (%+.1f%%)' % (100*ratio_branch) if ratio_branch else '')
|
||||
|
||||
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
@ -30,113 +164,6 @@ def openio(path, mode='r'):
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
class CoverageResult(co.namedtuple('CoverageResult',
|
||||
'line_hits,line_count,branch_hits,branch_count')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, line_hits=0, line_count=0, branch_hits=0, branch_count=0):
|
||||
return super().__new__(cls,
|
||||
int(line_hits),
|
||||
int(line_count),
|
||||
int(branch_hits),
|
||||
int(branch_count))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
self.line_hits + other.line_hits,
|
||||
self.line_count + other.line_count,
|
||||
self.branch_hits + other.branch_hits,
|
||||
self.branch_count + other.branch_count)
|
||||
|
||||
def __sub__(self, other):
|
||||
return CoverageDiff(other, self)
|
||||
|
||||
def key(self, **args):
|
||||
line_ratio = (self.line_hits/self.line_count
|
||||
if self.line_count else -1)
|
||||
branch_ratio = (self.branch_hits/self.branch_count
|
||||
if self.branch_count else -1)
|
||||
|
||||
if args.get('line_sort'):
|
||||
return (-line_ratio, -branch_ratio)
|
||||
elif args.get('reverse_line_sort'):
|
||||
return (+line_ratio, +branch_ratio)
|
||||
elif args.get('branch_sort'):
|
||||
return (-branch_ratio, -line_ratio)
|
||||
elif args.get('reverse_branch_sort'):
|
||||
return (+branch_ratio, +line_ratio)
|
||||
else:
|
||||
return None
|
||||
|
||||
_header = '%19s %19s' % ('hits/line', 'hits/branch')
|
||||
def __str__(self):
|
||||
return '%11s %7s %11s %7s' % (
|
||||
'%d/%d' % (self.line_hits, self.line_count)
|
||||
if self.line_count else '-',
|
||||
'%.1f%%' % (100*self.line_hits/self.line_count)
|
||||
if self.line_count else '-',
|
||||
'%d/%d' % (self.branch_hits, self.branch_count)
|
||||
if self.branch_count else '-',
|
||||
'%.1f%%' % (100*self.branch_hits/self.branch_count)
|
||||
if self.branch_count else '-')
|
||||
|
||||
class CoverageDiff(co.namedtuple('CoverageDiff', 'old,new')):
|
||||
__slots__ = ()
|
||||
|
||||
def line_hits_diff(self):
|
||||
return self.new.line_hits - self.old.line_hits
|
||||
|
||||
def line_count_diff(self):
|
||||
return self.new.line_count - self.old.line_count
|
||||
|
||||
def line_ratio(self):
|
||||
return ((self.new.line_hits/self.new.line_count
|
||||
if self.new.line_count else 1.0)
|
||||
- (self.old.line_hits / self.old.line_count
|
||||
if self.old.line_count else 1.0))
|
||||
|
||||
def branch_hits_diff(self):
|
||||
return self.new.branch_hits - self.old.branch_hits
|
||||
|
||||
def branch_count_diff(self):
|
||||
return self.new.branch_count - self.old.branch_count
|
||||
|
||||
def branch_ratio(self):
|
||||
return ((self.new.branch_hits/self.new.branch_count
|
||||
if self.new.branch_count else 1.0)
|
||||
- (self.old.branch_hits / self.old.branch_count
|
||||
if self.old.branch_count else 1.0))
|
||||
|
||||
def key(self, **args):
|
||||
new_key = self.new.key(**args)
|
||||
line_ratio = self.line_ratio()
|
||||
branch_ratio = self.branch_ratio()
|
||||
if new_key is not None:
|
||||
return new_key
|
||||
else:
|
||||
return (-line_ratio, -branch_ratio)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.line_ratio() or self.branch_ratio())
|
||||
|
||||
_header = '%23s %23s %23s' % ('old', 'new', 'diff')
|
||||
def __str__(self):
|
||||
line_ratio = self.line_ratio()
|
||||
branch_ratio = self.branch_ratio()
|
||||
return '%11s %11s %11s %11s %11s %11s%-10s%s' % (
|
||||
'%d/%d' % (self.old.line_hits, self.old.line_count)
|
||||
if self.old.line_count else '-',
|
||||
'%d/%d' % (self.old.branch_hits, self.old.branch_count)
|
||||
if self.old.branch_count else '-',
|
||||
'%d/%d' % (self.new.line_hits, self.new.line_count)
|
||||
if self.new.line_count else '-',
|
||||
'%d/%d' % (self.new.branch_hits, self.new.branch_count)
|
||||
if self.new.branch_count else '-',
|
||||
'%+d/%+d' % (self.line_hits_diff(), self.line_count_diff()),
|
||||
'%+d/%+d' % (self.branch_hits_diff(), self.branch_count_diff()),
|
||||
' (%+.1f%%)' % (100*line_ratio) if line_ratio else '',
|
||||
' (%+.1f%%)' % (100*branch_ratio) if branch_ratio else '')
|
||||
|
||||
|
||||
def collect(paths, **args):
|
||||
results = {}
|
||||
for path in paths:
|
||||
@ -180,12 +207,12 @@ def collect(paths, **args):
|
||||
results[(src_path, func, line['line_number'])] = (
|
||||
line['count'],
|
||||
CoverageResult(
|
||||
line_hits=1 if line['count'] > 0 else 0,
|
||||
line_count=1,
|
||||
branch_hits=sum(
|
||||
coverage_line_hits=1 if line['count'] > 0 else 0,
|
||||
coverage_line_count=1,
|
||||
coverage_branch_hits=sum(
|
||||
1 if branch['count'] > 0 else 0
|
||||
for branch in line['branches']),
|
||||
branch_count=len(line['branches'])))
|
||||
coverage_branch_count=len(line['branches'])))
|
||||
|
||||
# merge into functions, since this is what other scripts use
|
||||
func_results = co.defaultdict(lambda: CoverageResult())
|
||||
@ -210,15 +237,13 @@ def main(**args):
|
||||
print('no .gcda files found in %r?' % args['gcda_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
# TODO consistent behavior between this and stack.py for deps?
|
||||
results, line_results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = {
|
||||
(result['file'], result['name']): CoverageResult(**{
|
||||
k: v for k, v in result.items()
|
||||
if k in CoverageResult._fields})
|
||||
(result['file'], result['name']): CoverageResult(
|
||||
*(result[f] for f in CoverageResult._fields))
|
||||
for result in r
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in CoverageResult._fields)}
|
||||
@ -229,9 +254,8 @@ def main(**args):
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = {
|
||||
(result['file'], result['name']): CoverageResult(**{
|
||||
k: v for k, v in result.items()
|
||||
if k in CoverageResult._fields})
|
||||
(result['file'], result['name']): CoverageResult(
|
||||
*(result[f] for f in CoverageResult._fields))
|
||||
for result in r
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in CoverageResult._fields)}
|
||||
@ -259,8 +283,7 @@ def main(**args):
|
||||
pass
|
||||
|
||||
for (file, func), result in results.items():
|
||||
for f in CoverageResult._fields:
|
||||
merged_results[(file, func)][f] = getattr(result, f)
|
||||
merged_results[(file, func)] |= result._asdict()
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name',
|
||||
@ -311,7 +334,7 @@ def main(**args):
|
||||
for k, result in prev_results.items():
|
||||
prev_entries[entry(k)] += result
|
||||
|
||||
diff_entries = {name: entries[name] - prev_entries[name]
|
||||
diff_entries = {name: entries.get(name) - prev_entries.get(name)
|
||||
for name in (entries.keys() | prev_entries.keys())}
|
||||
|
||||
for name, diff in sorted(diff_entries.items(),
|
||||
@ -335,10 +358,12 @@ def main(**args):
|
||||
|
||||
# catch lack of coverage
|
||||
if args.get('error_on_lines') and any(
|
||||
r.line_hits < r.line_count for r in results.values()):
|
||||
r.coverage_line_hits < r.coverage_line_count
|
||||
for r in results.values()):
|
||||
sys.exit(2)
|
||||
elif args.get('error_on_branches') and any(
|
||||
r.branch_hits < r.branch_count for r in results.values()):
|
||||
r.coverage_branch_hits < r.coverage_branch_count
|
||||
for r in results.values()):
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
|
||||
255
scripts/data.py
255
scripts/data.py
@ -17,6 +17,61 @@ import collections as co
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
class DataResult(co.namedtuple('DataResult', 'data_size')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, data_size=0):
|
||||
return super().__new__(cls, int(data_size))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.data_size + other.data_size)
|
||||
|
||||
def __sub__(self, other):
|
||||
return DataDiff(other, self)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self, **args):
|
||||
if args.get('size_sort'):
|
||||
return -self.data_size
|
||||
elif args.get('reverse_size_sort'):
|
||||
return +self.data_size
|
||||
else:
|
||||
return None
|
||||
|
||||
_header = '%7s' % 'size'
|
||||
def __str__(self):
|
||||
return '%7d' % self.data_size
|
||||
|
||||
class DataDiff(co.namedtuple('DataDiff', 'old,new')):
|
||||
__slots__ = ()
|
||||
|
||||
def ratio(self):
|
||||
old = self.old.data_size if self.old is not None else 0
|
||||
new = self.new.data_size if self.new is not None else 0
|
||||
return (new-old) / old if old else 1.0
|
||||
|
||||
def key(self, **args):
|
||||
return (
|
||||
self.new.key(**args) if self.new is not None else 0,
|
||||
-self.ratio())
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.ratio())
|
||||
|
||||
_header = '%7s %7s %7s' % ('old', 'new', 'diff')
|
||||
def __str__(self):
|
||||
old = self.old.data_size if self.old is not None else 0
|
||||
new = self.new.data_size if self.new is not None else 0
|
||||
diff = new - old
|
||||
ratio = self.ratio()
|
||||
return '%7s %7s %+7d%s' % (
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else '')
|
||||
|
||||
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
@ -27,12 +82,17 @@ def openio(path, mode='r'):
|
||||
return open(path, mode)
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
results = co.defaultdict(lambda: DataResult())
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# map to source file
|
||||
src_path = re.sub('\.o$', '.c', path)
|
||||
if args.get('build_dir'):
|
||||
src_path = re.sub('%s/*' % re.escape(args['build_dir']), '',
|
||||
src_path)
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
@ -45,7 +105,14 @@ def collect(paths, **args):
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
func = m.group('func')
|
||||
# discard internal functions
|
||||
if not args.get('everything') and func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
results[(src_path, func)] += DataResult(
|
||||
int(m.group('size'), 16))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
@ -53,23 +120,7 @@ def collect(paths, **args):
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
return results
|
||||
|
||||
def main(**args):
|
||||
# find sizes
|
||||
@ -91,35 +142,27 @@ def main(**args):
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
results = {
|
||||
(result['file'], result['name']): DataResult(
|
||||
*(result[f] for f in DataResult._fields))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in DataResult._fields)}
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
prev_results = {
|
||||
(result['file'], result['name']): DataResult(
|
||||
*(result[f] for f in DataResult._fields))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in DataResult._fields)}
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
@ -133,111 +176,87 @@ def main(**args):
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('data_size', None)
|
||||
for f in DataResult._fields:
|
||||
result.pop(f, None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['data_size'] = size
|
||||
for (file, func), result in results.items():
|
||||
merged_results[(file, func)] |= result._asdict()
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
|
||||
w = csv.DictWriter(f, ['file', 'name',
|
||||
*other_fields, *DataResult._fields])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
def print_header(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
print('%-36s %s' % (by, DataResult._header))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
old = {entry(k) for k in results.keys()}
|
||||
new = {entry(k) for k in prev_results.keys()}
|
||||
print('%-36s %s' % (
|
||||
'%s (%d added, %d removed)' % (by,
|
||||
sum(1 for k in new if k not in old),
|
||||
sum(1 for k in old if k not in new))
|
||||
if by else '',
|
||||
DataDiff._header))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
def print_entries(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
entries = co.defaultdict(lambda: DataResult())
|
||||
for k, result in results.items():
|
||||
entries[entry(k)] += result
|
||||
|
||||
if not args.get('diff'):
|
||||
for name, result in sorted(entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p)):
|
||||
print('%-36s %s' % (name, result))
|
||||
else:
|
||||
prev_entries = co.defaultdict(lambda: DataResult())
|
||||
for k, result in prev_results.items():
|
||||
prev_entries[entry(k)] += result
|
||||
|
||||
diff_entries = {name: entries.get(name) - prev_entries.get(name)
|
||||
for name in (entries.keys() | prev_entries.keys())}
|
||||
|
||||
for name, diff in sorted(diff_entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p)):
|
||||
if diff or args.get('all'):
|
||||
print('%-36s %s' % (name, diff))
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
print_header('')
|
||||
print_entries('total')
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
print_header('file')
|
||||
print_entries('file')
|
||||
print_entries('total')
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
print_header('function')
|
||||
print_entries('function')
|
||||
print_entries('total')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
386
scripts/stack.py
386
scripts/stack.py
@ -24,6 +24,90 @@ def openio(path, mode='r'):
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
class StackResult(co.namedtuple('StackResult', 'stack_frame,stack_limit')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, stack_frame=0, stack_limit=0):
|
||||
return super().__new__(cls,
|
||||
int(stack_frame),
|
||||
float(stack_limit))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
self.stack_frame + other.stack_frame,
|
||||
max(self.stack_limit, other.stack_limit))
|
||||
|
||||
def __sub__(self, other):
|
||||
return StackDiff(other, self)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self, **args):
|
||||
if args.get('limit_sort'):
|
||||
return -self.stack_limit
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return +self.stack_limit
|
||||
elif args.get('frame_sort'):
|
||||
return -self.stack_frame
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return +self.stack_frame
|
||||
else:
|
||||
return None
|
||||
|
||||
_header = '%7s %7s' % ('frame', 'limit')
|
||||
def __str__(self):
|
||||
return '%7d %7s' % (
|
||||
self.stack_frame,
|
||||
'∞' if m.isinf(self.stack_limit) else int(self.stack_limit))
|
||||
|
||||
class StackDiff(co.namedtuple('StackDiff', 'old,new')):
|
||||
__slots__ = ()
|
||||
|
||||
def ratio(self):
|
||||
old_limit = self.old.stack_limit if self.old is not None else 0
|
||||
new_limit = self.new.stack_limit if self.new is not None else 0
|
||||
return (0.0 if m.isinf(new_limit) and m.isinf(old_limit)
|
||||
else +float('inf') if m.isinf(new_limit)
|
||||
else -float('inf') if m.isinf(old_limit)
|
||||
else 0.0 if not old_limit and not new_limit
|
||||
else 1.0 if not old_limit
|
||||
else (new_limit-old_limit) / old_limit)
|
||||
|
||||
def key(self, **args):
|
||||
return (
|
||||
self.new.key(**args) if self.new is not None else 0,
|
||||
-self.ratio())
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.ratio())
|
||||
|
||||
_header = '%15s %15s %15s' % ('old', 'new', 'diff')
|
||||
def __str__(self):
|
||||
old_frame = self.old.stack_frame if self.old is not None else 0
|
||||
old_limit = self.old.stack_limit if self.old is not None else 0
|
||||
new_frame = self.new.stack_frame if self.new is not None else 0
|
||||
new_limit = self.new.stack_limit if self.new is not None else 0
|
||||
diff_frame = new_frame - old_frame
|
||||
diff_limit = (0 if m.isinf(new_limit) and m.isinf(old_limit)
|
||||
else new_limit - old_limit)
|
||||
ratio = self.ratio()
|
||||
return '%7s %7s %7s %7s %+7d %7s%s' % (
|
||||
old_frame if self.old is not None else '-',
|
||||
('∞' if m.isinf(old_limit) else int(old_limit))
|
||||
if self.old is not None else '-',
|
||||
new_frame if self.new is not None else '-',
|
||||
('∞' if m.isinf(new_limit) else int(new_limit))
|
||||
if self.new is not None else '-',
|
||||
diff_frame,
|
||||
'+∞' if diff_limit > 0 and m.isinf(diff_limit)
|
||||
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
|
||||
else '%+d' % diff_limit,
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio))
|
||||
|
||||
|
||||
def collect(paths, **args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
@ -55,7 +139,7 @@ def collect(paths, **args):
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
results = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
callgraph = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in paths:
|
||||
@ -73,29 +157,29 @@ def collect(paths, **args):
|
||||
if not args.get('quiet') and type != 'static':
|
||||
print('warning: found non-static stack for %s (%s)'
|
||||
% (function, type))
|
||||
_, _, _, targets = results[info['title']]
|
||||
results[info['title']] = (
|
||||
_, _, _, targets = callgraph[info['title']]
|
||||
callgraph[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = results[info['sourcename']]
|
||||
_, _, _, targets = callgraph[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
if not args.get('everything'):
|
||||
for source, (s_file, s_function, _, _) in list(results.items()):
|
||||
for source, (s_file, s_function, _, _) in list(callgraph.items()):
|
||||
# discard internal functions
|
||||
if s_file.startswith('<') or s_file.startswith('/usr/include'):
|
||||
del results[source]
|
||||
del callgraph[source]
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in results:
|
||||
if source not in callgraph:
|
||||
return 0
|
||||
_, _, frame, targets = results[source]
|
||||
_, _, frame, targets = callgraph[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
@ -107,22 +191,24 @@ def collect(paths, **args):
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_deps(targets):
|
||||
deps = set()
|
||||
def find_calls(targets):
|
||||
calls = set()
|
||||
for target in targets:
|
||||
if target in results:
|
||||
t_file, t_function, _, _ = results[target]
|
||||
deps.add((t_file, t_function))
|
||||
return deps
|
||||
if target in callgraph:
|
||||
t_file, t_function, _, _ = callgraph[target]
|
||||
calls.add((t_file, t_function))
|
||||
return calls
|
||||
|
||||
# flatten into a list
|
||||
flat_results = []
|
||||
for source, (s_file, s_function, frame, targets) in results.items():
|
||||
# build results
|
||||
results = {}
|
||||
result_calls = {}
|
||||
for source, (s_file, s_function, frame, targets) in callgraph.items():
|
||||
limit = find_limit(source)
|
||||
deps = find_deps(targets)
|
||||
flat_results.append((s_file, s_function, frame, limit, deps))
|
||||
calls = find_calls(targets)
|
||||
results[(s_file, s_function)] = StackResult(frame, limit)
|
||||
result_calls[(s_file, s_function)] = calls
|
||||
|
||||
return flat_results
|
||||
return results, result_calls
|
||||
|
||||
def main(**args):
|
||||
# find sizes
|
||||
@ -140,49 +226,33 @@ def main(**args):
|
||||
print('no .ci files found in %r?' % args['ci_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
results, result_calls = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']), # note limit can be inf
|
||||
set())
|
||||
results = {
|
||||
(result['file'], result['name']): StackResult(
|
||||
*(result[f] for f in StackResult._fields))
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in StackResult._fields)}
|
||||
|
||||
total_frame = 0
|
||||
total_limit = 0
|
||||
for _, _, frame, limit, _ in results:
|
||||
total_frame += frame
|
||||
total_limit = max(total_limit, limit)
|
||||
result_calls = {}
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']),
|
||||
set())
|
||||
prev_results = {
|
||||
(result['file'], result['name']): StackResult(
|
||||
*(result[f] for f in StackResult._fields))
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in StackResult._fields)}
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_frame = 0
|
||||
prev_total_limit = 0
|
||||
for _, _, frame, limit, _ in prev_results:
|
||||
prev_total_frame += frame
|
||||
prev_total_limit = max(prev_total_limit, limit)
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
@ -196,193 +266,113 @@ def main(**args):
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('stack_frame', None)
|
||||
result.pop('stack_limit', None)
|
||||
for f in StackResult._fields:
|
||||
result.pop(f, None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, frame, limit, _ in results:
|
||||
merged_results[(file, func)]['stack_frame'] = frame
|
||||
merged_results[(file, func)]['stack_limit'] = limit
|
||||
for (file, func), result in results.items():
|
||||
merged_results[(file, func)] |= result._asdict()
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
|
||||
w = csv.DictWriter(f, ['file', 'name',
|
||||
*other_fields, *StackResult._fields])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0, set()))
|
||||
for file, func, frame, limit, deps in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_frame, entry_limit, entry_deps = entries[entry]
|
||||
entries[entry] = (
|
||||
entry_frame + frame,
|
||||
max(entry_limit, limit),
|
||||
entry_deps | {file if by == 'file' else func
|
||||
for file, func in deps})
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
|
||||
for name, (new_frame, new_limit, deps) in news.items():
|
||||
diff[name] = (
|
||||
None, None,
|
||||
new_frame, new_limit,
|
||||
new_frame, new_limit,
|
||||
1.0,
|
||||
deps)
|
||||
for name, (old_frame, old_limit, _) in olds.items():
|
||||
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
|
||||
diff[name] = (
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
(new_frame or 0) - (old_frame or 0),
|
||||
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else (new_limit or 0) - (old_limit or 0),
|
||||
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else +float('inf') if m.isinf(new_limit or 0)
|
||||
else -float('inf') if m.isinf(old_limit or 0)
|
||||
else +0.0 if not old_limit and not new_limit
|
||||
else +1.0 if not old_limit
|
||||
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
|
||||
deps)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][0], x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][0], x))
|
||||
def print_header(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
return sorted(entries)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
|
||||
print('%-36s %s' % (by, StackResult._header))
|
||||
else:
|
||||
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
|
||||
old = {entry(k) for k in results.keys()}
|
||||
new = {entry(k) for k in prev_results.keys()}
|
||||
print('%-36s %s' % (
|
||||
'%s (%d added, %d removed)' % (by,
|
||||
sum(1 for k in new if k not in old),
|
||||
sum(1 for k in old if k not in new))
|
||||
if by else '',
|
||||
StackDiff._header))
|
||||
|
||||
def print_entry(name, frame, limit):
|
||||
print("%-36s %7d %7s" % (name,
|
||||
frame, '∞' if m.isinf(limit) else int(limit)))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio):
|
||||
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
|
||||
old_frame if old_frame is not None else "-",
|
||||
('∞' if m.isinf(old_limit) else int(old_limit))
|
||||
if old_limit is not None else "-",
|
||||
new_frame if new_frame is not None else "-",
|
||||
('∞' if m.isinf(new_limit) else int(new_limit))
|
||||
if new_limit is not None else "-",
|
||||
diff_frame,
|
||||
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
|
||||
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
|
||||
else '%+d' % diff_limit),
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio)))
|
||||
|
||||
def print_entries(by='name'):
|
||||
# build optional tree of dependencies
|
||||
def print_deps(entries, depth, print,
|
||||
def print_entries(by):
|
||||
# print optional tree of dependencies
|
||||
def print_calls(entries, entry_calls, depth,
|
||||
filter=lambda _: True,
|
||||
prefixes=('', '', '', '')):
|
||||
entries = entries if isinstance(entries, list) else list(entries)
|
||||
filtered_entries = [(name, entry)
|
||||
for name, entry in entries
|
||||
if filter(name)]
|
||||
for i, (name, entry) in enumerate(filtered_entries):
|
||||
filtered_entries = {
|
||||
name: result for name, result in entries.items()
|
||||
if filter(name)}
|
||||
for i, (name, result) in enumerate(sorted(filtered_entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p))):
|
||||
last = (i == len(filtered_entries)-1)
|
||||
print(prefixes[0+last] + name, entry)
|
||||
print('%-36s %s' % (prefixes[0+last] + name, result))
|
||||
|
||||
if depth > 0:
|
||||
deps = entry[-1]
|
||||
print_deps(entries, depth-1, print,
|
||||
lambda name: name in deps,
|
||||
if depth > 0 and by != 'total':
|
||||
calls = entry_calls.get(name, set())
|
||||
print_calls(entries, entry_calls, depth-1,
|
||||
lambda name: name in calls,
|
||||
( prefixes[2+last] + "|-> ",
|
||||
prefixes[2+last] + "'-> ",
|
||||
prefixes[2+last] + "| ",
|
||||
prefixes[2+last] + " "))
|
||||
|
||||
entries = dedup_entries(results, by=by)
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
entry = lambda k: k[1]
|
||||
|
||||
entries = co.defaultdict(lambda: StackResult())
|
||||
for k, result in results.items():
|
||||
entries[entry(k)] += result
|
||||
|
||||
entry_calls = co.defaultdict(lambda: set())
|
||||
for k, calls in result_calls.items():
|
||||
entry_calls[entry(k)] |= {entry(c) for c in calls}
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
print_deps(
|
||||
sorted_entries(entries.items()),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_entry(name, *entry[:-1]))
|
||||
print_calls(
|
||||
entries,
|
||||
entry_calls,
|
||||
args.get('depth', 0))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
prev_entries = co.defaultdict(lambda: StackResult())
|
||||
for k, result in prev_results.items():
|
||||
prev_entries[entry(k)] += result
|
||||
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
|
||||
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
|
||||
print_deps(
|
||||
filter(
|
||||
lambda x: x[1][6] or args.get('all'),
|
||||
sorted_diff_entries(diff.items())),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_diff_entry(name, *entry[:-1]))
|
||||
diff_entries = {name: entries.get(name) - prev_entries.get(name)
|
||||
for name in (entries.keys() | prev_entries.keys())}
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_frame, total_limit)
|
||||
else:
|
||||
diff_frame = total_frame - prev_total_frame
|
||||
diff_limit = (
|
||||
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else (total_limit or 0) - (prev_total_limit or 0))
|
||||
ratio = (
|
||||
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else +float('inf') if m.isinf(total_limit or 0)
|
||||
else -float('inf') if m.isinf(prev_total_limit or 0)
|
||||
else 0.0 if not prev_total_limit and not total_limit
|
||||
else 1.0 if not prev_total_limit
|
||||
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_frame, prev_total_limit,
|
||||
total_frame, total_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio)
|
||||
print_calls(
|
||||
{name: diff for name, diff in diff_entries.items()
|
||||
if diff or args.get('all')},
|
||||
entry_calls,
|
||||
args.get('depth', 0))
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
print_header('')
|
||||
print_entries('total')
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
print_header('file')
|
||||
print_entries('file')
|
||||
print_entries('total')
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
print_header('function')
|
||||
print_entries('function')
|
||||
print_entries('total')
|
||||
|
||||
# catch recursion
|
||||
if args.get('error_on_recursion') and any(
|
||||
|
||||
@ -24,6 +24,60 @@ def openio(path, mode='r'):
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
class StructsResult(co.namedtuple('StructsResult', 'struct_size')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, struct_size=0):
|
||||
return super().__new__(cls, int(struct_size))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.struct_size + other.struct_size)
|
||||
|
||||
def __sub__(self, other):
|
||||
return StructsDiff(other, self)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self, **args):
|
||||
if args.get('size_sort'):
|
||||
return -self.struct_size
|
||||
elif args.get('reverse_size_sort'):
|
||||
return +self.struct_size
|
||||
else:
|
||||
return None
|
||||
|
||||
_header = '%7s' % 'size'
|
||||
def __str__(self):
|
||||
return '%7d' % self.struct_size
|
||||
|
||||
class StructsDiff(co.namedtuple('StructsDiff', 'old,new')):
|
||||
__slots__ = ()
|
||||
|
||||
def ratio(self):
|
||||
old = self.old.struct_size if self.old is not None else 0
|
||||
new = self.new.struct_size if self.new is not None else 0
|
||||
return (new-old) / old if old else 1.0
|
||||
|
||||
def key(self, **args):
|
||||
return (
|
||||
self.new.key(**args) if self.new is not None else 0,
|
||||
-self.ratio())
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.ratio())
|
||||
|
||||
_header = '%7s %7s %7s' % ('old', 'new', 'diff')
|
||||
def __str__(self):
|
||||
old = self.old.struct_size if self.old is not None else 0
|
||||
new = self.new.struct_size if self.new is not None else 0
|
||||
diff = new - old
|
||||
ratio = self.ratio()
|
||||
return '%7s %7s %+7d%s' % (
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else '')
|
||||
|
||||
def collect(paths, **args):
|
||||
decl_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
@ -36,7 +90,7 @@ def collect(paths, **args):
|
||||
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
|
||||
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = co.defaultdict(lambda: 0)
|
||||
results = {}
|
||||
for path in paths:
|
||||
# find decl, we want to filter by structs in .h files
|
||||
decls = {}
|
||||
@ -84,8 +138,18 @@ def collect(paths, **args):
|
||||
if (name is not None
|
||||
and decl is not None
|
||||
and size is not None):
|
||||
decl = decls.get(decl, '?')
|
||||
results[(decl, name)] = size
|
||||
file = decls.get(decl, '?')
|
||||
# map to source file
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
if args.get('build_dir'):
|
||||
file = re.sub(
|
||||
'%s/*' % re.escape(args['build_dir']), '',
|
||||
file)
|
||||
# only include structs declared in header files in the
|
||||
# current directory, ignore internal-only structs (
|
||||
# these are represented in other measurements)
|
||||
if args.get('everything') or file.endswith('.h'):
|
||||
results[(file, name)] = StructsResult(size)
|
||||
found = (m.group('tag') == 'structure_type')
|
||||
name = None
|
||||
decl = None
|
||||
@ -103,24 +167,7 @@ def collect(paths, **args):
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, struct), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# only include structs declared in header files in the current
|
||||
# directory, ignore internal-only # structs (these are represented
|
||||
# in other measurements)
|
||||
if not args.get('everything'):
|
||||
if not file.endswith('.h'):
|
||||
continue
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
|
||||
flat_results.append((file, struct, size))
|
||||
|
||||
return flat_results
|
||||
return results
|
||||
|
||||
|
||||
def main(**args):
|
||||
@ -143,35 +190,27 @@ def main(**args):
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
results = {
|
||||
(result['file'], result['name']): StructsResult(
|
||||
*(result[f] for f in StructsResult._fields))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in StructsResult._fields)}
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
prev_results = {
|
||||
(result['file'], result['name']): StructsResult(
|
||||
*(result[f] for f in StructsResult._fields))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in StructsResult._fields)}
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
@ -184,112 +223,88 @@ def main(**args):
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
struct = result.pop('name', '')
|
||||
result.pop('struct_size', None)
|
||||
merged_results[(file, struct)] = result
|
||||
func = result.pop('name', '')
|
||||
for f in StructsResult._fields:
|
||||
result.pop(f, None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, struct, size in results:
|
||||
merged_results[(file, struct)]['struct_size'] = size
|
||||
for (file, func), result in results.items():
|
||||
merged_results[(file, func)] |= result._asdict()
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
|
||||
w = csv.DictWriter(f, ['file', 'name',
|
||||
*other_fields, *StructsResult._fields])
|
||||
w.writeheader()
|
||||
for (file, struct), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': struct, **result})
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, struct, size in results:
|
||||
entry = (file if by == 'file' else struct)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
def print_header(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
print('%-36s %s' % (by, StructsResult._header))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
old = {entry(k) for k in results.keys()}
|
||||
new = {entry(k) for k in prev_results.keys()}
|
||||
print('%-36s %s' % (
|
||||
'%s (%d added, %d removed)' % (by,
|
||||
sum(1 for k in new if k not in old),
|
||||
sum(1 for k in old if k not in new))
|
||||
if by else '',
|
||||
StructsDiff._header))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
def print_entries(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
entry = lambda k: k[1]
|
||||
|
||||
entries = co.defaultdict(lambda: StructsResult())
|
||||
for k, result in results.items():
|
||||
entries[entry(k)] += result
|
||||
|
||||
if not args.get('diff'):
|
||||
for name, result in sorted(entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p)):
|
||||
print('%-36s %s' % (name, result))
|
||||
else:
|
||||
prev_entries = co.defaultdict(lambda: StructsResult())
|
||||
for k, result in prev_results.items():
|
||||
prev_entries[entry(k)] += result
|
||||
|
||||
diff_entries = {name: entries.get(name) - prev_entries.get(name)
|
||||
for name in (entries.keys() | prev_entries.keys())}
|
||||
|
||||
for name, diff in sorted(diff_entries.items(),
|
||||
key=lambda p: (p[1].key(**args), p)):
|
||||
if diff or args.get('all'):
|
||||
print('%-36s %s' % (name, diff))
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
print_header('')
|
||||
print_entries('total')
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
print_header('file')
|
||||
print_entries('file')
|
||||
print_entries('total')
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
print_header('struct')
|
||||
print_entries('struct')
|
||||
print_entries('total')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
@ -312,7 +327,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
help="Show all structs, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
|
||||
@ -3,58 +3,202 @@
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
|
||||
import functools as ft
|
||||
import collections as co
|
||||
import os
|
||||
import csv
|
||||
import re
|
||||
import functools as ft
|
||||
import math as m
|
||||
import os
|
||||
import re
|
||||
|
||||
# displayable fields
|
||||
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
|
||||
FIELDS = [
|
||||
# name, parse, accumulate, fmt, print, null
|
||||
Field('code',
|
||||
lambda r: int(r['code_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('data',
|
||||
lambda r: int(r['data_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('stack',
|
||||
lambda r: float(r['stack_limit']),
|
||||
max,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: '∞' if m.isinf(r) else int(r),
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('structs',
|
||||
lambda r: int(r['struct_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%8s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('coverage',
|
||||
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
|
||||
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
|
||||
lambda r: r[0]/r[1],
|
||||
'%19s',
|
||||
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
|
||||
'%11s %7s' % ('-', '-'),
|
||||
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
|
||||
]
|
||||
# each result is a type generated by another script
|
||||
RESULTS = []
|
||||
FIELDS = 'code,data,stack,structs'
|
||||
def result(cls):
|
||||
RESULTS.append(cls)
|
||||
return cls
|
||||
|
||||
@result
|
||||
class CodeResult(co.namedtuple('CodeResult', 'code_size')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, code_size=0):
|
||||
return super().__new__(cls, int(code_size))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.code_size + other.code_size)
|
||||
|
||||
def __sub__(self, other):
|
||||
old = other.code_size if other is not None else 0
|
||||
new = self.code_size if self is not None else 0
|
||||
return (new-old) / old if old else 1.0
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self):
|
||||
return -self.code_size
|
||||
|
||||
_header = '%7s' % 'code'
|
||||
_nil = '%7s' % '-'
|
||||
def __str__(self):
|
||||
return '%7s' % self.code_size
|
||||
|
||||
@result
|
||||
class DataResult(co.namedtuple('DataResult', 'data_size')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, data_size=0):
|
||||
return super().__new__(cls, int(data_size))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.data_size + other.data_size)
|
||||
|
||||
def __sub__(self, other):
|
||||
old = other.data_size if other is not None else 0
|
||||
new = self.data_size if self is not None else 0
|
||||
return (new-old) / old if old else 1.0
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self):
|
||||
return -self.data_size
|
||||
|
||||
_header = '%7s' % 'data'
|
||||
_nil = '%7s' % '-'
|
||||
def __str__(self):
|
||||
return '%7s' % self.data_size
|
||||
|
||||
@result
|
||||
class StackResult(co.namedtuple('StackResult', 'stack_limit')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, stack_limit=0):
|
||||
return super().__new__(cls, float(stack_limit))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(max(self.stack_limit, other.stack_limit))
|
||||
|
||||
def __sub__(self, other):
|
||||
old_limit = other.stack_limit if other is not None else 0
|
||||
new_limit = self.stack_limit if self is not None else 0
|
||||
return (0.0 if m.isinf(new_limit) and m.isinf(old_limit)
|
||||
else +float('inf') if m.isinf(new_limit)
|
||||
else -float('inf') if m.isinf(old_limit)
|
||||
else 0.0 if not old_limit and not new_limit
|
||||
else 1.0 if not old_limit
|
||||
else (new_limit-old_limit) / old_limit)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self):
|
||||
return -self.stack_limit
|
||||
|
||||
_header = '%7s' % 'stack'
|
||||
_nil = '%7s' % '-'
|
||||
def __str__(self):
|
||||
return '%7s' % (
|
||||
'∞' if m.isinf(self.stack_limit)
|
||||
else int(self.stack_limit))
|
||||
|
||||
@result
|
||||
class StructsResult(co.namedtuple('StructsResult', 'struct_size')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, struct_size=0):
|
||||
return super().__new__(cls, int(struct_size))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(self.struct_size + other.struct_size)
|
||||
|
||||
def __sub__(self, other):
|
||||
old = other.struct_size if other is not None else 0
|
||||
new = self.struct_size if self is not None else 0
|
||||
return (new-old) / old if old else 1.0
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self):
|
||||
return -self.struct_size
|
||||
|
||||
_header = '%7s' % 'structs'
|
||||
_nil = '%7s' % '-'
|
||||
def __str__(self):
|
||||
return '%7s' % self.struct_size
|
||||
|
||||
@result
|
||||
class CoverageLineResult(co.namedtuple('CoverageResult',
|
||||
'coverage_line_hits,coverage_line_count')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, coverage_line_hits=0, coverage_line_count=0):
|
||||
return super().__new__(cls,
|
||||
int(coverage_line_hits),
|
||||
int(coverage_line_count))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
self.coverage_line_hits + other.coverage_line_hits,
|
||||
self.coverage_line_count + other.coverage_line_count)
|
||||
|
||||
def __sub__(self, other):
|
||||
old_hits = other.coverage_line_hits if other is not None else 0
|
||||
old_count = other.coverage_line_count if other is not None else 0
|
||||
new_hits = self.coverage_line_hits if self is not None else 0
|
||||
new_count = self.coverage_line_count if self is not None else 0
|
||||
return ((new_hits/new_count if new_count else 1.0)
|
||||
- (old_hits/old_count if old_count else 1.0))
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self):
|
||||
return -(self.coverage_line_hits/self.coverage_line_count
|
||||
if self.coverage_line_count else -1)
|
||||
|
||||
_header = '%19s' % 'coverage/line'
|
||||
_nil = '%11s %7s' % ('-', '-')
|
||||
def __str__(self):
|
||||
return '%11s %7s' % (
|
||||
'%d/%d' % (self.coverage_line_hits, self.coverage_line_count)
|
||||
if self.coverage_line_count else '-',
|
||||
'%.1f%%' % (100*self.coverage_line_hits/self.coverage_line_count)
|
||||
if self.coverage_line_count else '-')
|
||||
|
||||
@result
|
||||
class CoverageBranchResult(co.namedtuple('CoverageResult',
|
||||
'coverage_branch_hits,coverage_branch_count')):
|
||||
__slots__ = ()
|
||||
def __new__(cls, coverage_branch_hits=0, coverage_branch_count=0):
|
||||
return super().__new__(cls,
|
||||
int(coverage_branch_hits),
|
||||
int(coverage_branch_count))
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(
|
||||
self.coverage_branch_hits + other.coverage_branch_hits,
|
||||
self.coverage_branch_count + other.coverage_branch_count)
|
||||
|
||||
def __sub__(self, other):
|
||||
old_hits = other.coverage_branch_hits if other is not None else 0
|
||||
old_count = other.coverage_branch_count if other is not None else 0
|
||||
new_hits = self.coverage_branch_hits if self is not None else 0
|
||||
new_count = self.coverage_branch_count if self is not None else 0
|
||||
return ((new_hits/new_count if new_count else 1.0)
|
||||
- (old_hits/old_count if old_count else 1.0))
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__class__.__sub__(other, self)
|
||||
|
||||
def key(self):
|
||||
return -(self.coverage_branch_hits/self.coverage_branch_count
|
||||
if self.coverage_branch_count else -1)
|
||||
|
||||
_header = '%19s' % 'coverage/branch'
|
||||
_nil = '%11s %7s' % ('-', '-')
|
||||
def __str__(self):
|
||||
return '%11s %7s' % (
|
||||
'%d/%d' % (self.coverage_branch_hits, self.coverage_branch_count)
|
||||
if self.coverage_branch_count else '-',
|
||||
'%.1f%%' % (100*self.coverage_branch_hits/self.coverage_branch_count)
|
||||
if self.coverage_branch_count else '-')
|
||||
|
||||
|
||||
def openio(path, mode='r'):
|
||||
@ -76,178 +220,171 @@ def main(**args):
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
for Result in RESULTS:
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in Result._fields):
|
||||
results[(file, name)][Result.__name__] = (
|
||||
results[(file, name)].get(
|
||||
Result.__name__, Result())
|
||||
+ Result(*(result[f]
|
||||
for f in Result._fields)))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# find fields
|
||||
if args.get('all_fields'):
|
||||
fields = FIELDS
|
||||
elif args.get('fields') is not None:
|
||||
fields_dict = {field.name: field for field in FIELDS}
|
||||
fields = [fields_dict[f] for f in args['fields']]
|
||||
else:
|
||||
fields = []
|
||||
for field in FIELDS:
|
||||
if any(field.name in result for result in results.values()):
|
||||
fields.append(field)
|
||||
|
||||
# find total for every field
|
||||
total = {}
|
||||
for result in results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in total:
|
||||
total[field.name] = field.acc(
|
||||
[total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
total[field.name] = result[field.name]
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
prev_results = co.defaultdict(lambda: {})
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = prev_results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
prev_results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
prev_results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
for path in args.get('csv_paths', '-'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
for Result in RESULTS:
|
||||
if all(result.get(f) not in {None, ''}
|
||||
for f in Result._fields):
|
||||
prev_results[(file, name)][Result.__name__] = (
|
||||
prev_results[(file, name)].get(
|
||||
Result.__name__, Result())
|
||||
+ Result(*(result[f]
|
||||
for f in Result._fields)))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
prev_total = {}
|
||||
for result in prev_results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev_total:
|
||||
prev_total[field.name] = field.acc(
|
||||
[prev_total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
prev_total[field.name] = result[field.name]
|
||||
# filter our result types by results that are present
|
||||
if 'all' in args['fields']:
|
||||
filtered_results = RESULTS
|
||||
else:
|
||||
filtered_results = [
|
||||
Result for Result in RESULTS
|
||||
if (any(f.startswith(r)
|
||||
for r in args['fields']
|
||||
for f in Result._fields)
|
||||
or any(Result._header.strip().startswith(r)
|
||||
for r in args['fields']))]
|
||||
|
||||
# figure out a sort key
|
||||
if args.get('sort'):
|
||||
key_Result = next(
|
||||
Result for Result in RESULTS
|
||||
if (any(f.startswith(args['sort'])
|
||||
for f in Result._fields)
|
||||
or Result._header.strip().startswith(args['sort'])))
|
||||
key = lambda result: result.get(key_Result.__name__, key_Result()).key()
|
||||
reverse = False
|
||||
elif args.get('reverse_sort'):
|
||||
key_Result = next(
|
||||
Result for Result in RESULTS
|
||||
if (any(f.startswith(args['reverse_sort'])
|
||||
for f in Result._fields)
|
||||
or Result._header.strip().startswith(args['reverse_sort'])))
|
||||
key = lambda result: result.get(key_Result.__name__, key_Result()).key()
|
||||
reverse = True
|
||||
else:
|
||||
key = lambda _: None
|
||||
reverse = False
|
||||
|
||||
# write merged results to CSV
|
||||
if args.get('output'):
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, sum(
|
||||
(Result._fields for Result in filtered_results),
|
||||
('file', 'name')))
|
||||
w.writeheader()
|
||||
for (file, name), result in sorted(results.items()):
|
||||
w.writerow(ft.reduce(dict.__or__,
|
||||
(r._asdict() for r in result.values()),
|
||||
{'file': file, 'name': name}))
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
def print_header(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
entry = lambda k: k[1]
|
||||
|
||||
if not args.get('diff'):
|
||||
print('%-36s %s' % (by,
|
||||
' '.join(Result._header for Result in filtered_results)))
|
||||
else:
|
||||
old = {entry(k) for k in results.keys()}
|
||||
new = {entry(k) for k in prev_results.keys()}
|
||||
print('%-36s %s' % (
|
||||
'%s (%d added, %d removed)' % (by,
|
||||
sum(1 for k in new if k not in old),
|
||||
sum(1 for k in old if k not in new))
|
||||
if by else '',
|
||||
' '.join('%s%-10s' % (Result._header, '')
|
||||
for Result in filtered_results)))
|
||||
|
||||
def print_entries(by):
|
||||
if by == 'total':
|
||||
entry = lambda k: 'TOTAL'
|
||||
elif by == 'file':
|
||||
entry = lambda k: k[0]
|
||||
else:
|
||||
entry = lambda k: k[1]
|
||||
|
||||
entries = co.defaultdict(lambda: {})
|
||||
for (file, func), result in results.items():
|
||||
entry = (file if by == 'file' else func)
|
||||
prev = entries[entry]
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev:
|
||||
entries[entry][field.name] = field.acc(
|
||||
[prev[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
entries[entry][field.name] = result[field.name]
|
||||
return entries
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
elif args.get('reverse_sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print()
|
||||
else:
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print(' %-9s' % '', end='')
|
||||
print()
|
||||
|
||||
def print_entry(name, result):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
r = result.get(field.name)
|
||||
if r is not None:
|
||||
print((' '+field.fmt) % field.repr(r), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
print()
|
||||
|
||||
def print_diff_entry(name, old, new):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
n = new.get(field.name)
|
||||
if n is not None:
|
||||
print((' '+field.fmt) % field.repr(n), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
o = old.get(field.name)
|
||||
ratio = (
|
||||
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
|
||||
else +float('inf') if m.isinf(n or 0)
|
||||
else -float('inf') if m.isinf(o or 0)
|
||||
else 0.0 if not o and not n
|
||||
else +1.0 if not o
|
||||
else -1.0 if not n
|
||||
else field.ratio(o, n))
|
||||
print(' %-9s' % (
|
||||
'' if not ratio
|
||||
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else '(%+.1f%%)' % (100*ratio)), end='')
|
||||
print()
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
for k, result in results.items():
|
||||
entries[entry(k)] |= {
|
||||
r.__class__.__name__: entries[entry(k)].get(
|
||||
r.__class__.__name__, r.__class__()) + r
|
||||
for r in result.values()}
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
print_entry(name, result)
|
||||
for name, result in sorted(entries.items(),
|
||||
key=lambda p: (key(p[1]), p),
|
||||
reverse=reverse):
|
||||
print('%-36s %s' % (name, ' '.join(
|
||||
str(result.get(Result.__name__, Result._nil))
|
||||
for Result in filtered_results)))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for name in entries if name not in prev_entries),
|
||||
sum(1 for name in prev_entries if name not in entries)))
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
if args.get('all') or result != prev_entries.get(name, {}):
|
||||
print_diff_entry(name, prev_entries.get(name, {}), result)
|
||||
prev_entries = co.defaultdict(lambda: {})
|
||||
for k, result in prev_results.items():
|
||||
prev_entries[entry(k)] |= {
|
||||
r.__class__.__name__: prev_entries[entry(k)].get(
|
||||
r.__class__.__name__, r.__class__()) + r
|
||||
for r in result.values()}
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
print_diff_entry('TOTAL', prev_total, total)
|
||||
diff_entries = {
|
||||
name: (prev_entries.get(name), entries.get(name))
|
||||
for name in (entries.keys() | prev_entries.keys())}
|
||||
|
||||
if args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
for name, (old, new) in sorted(diff_entries.items(),
|
||||
key=lambda p: (key(p[1][1]), p)):
|
||||
fields = []
|
||||
changed = False
|
||||
for Result in filtered_results:
|
||||
o = old.get(Result.__name__) if old is not None else None
|
||||
n = new.get(Result.__name__) if new is not None else None
|
||||
ratio = n - o if n is not None or o is not None else 0
|
||||
changed = changed or ratio
|
||||
fields.append('%s%-10s' % (
|
||||
n if n is not None else Result._nil,
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio)))
|
||||
if changed or args.get('all'):
|
||||
print('%-36s %s' % (name, ' '.join(fields)))
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header('')
|
||||
print_entries('total')
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
print_header('file')
|
||||
print_entries('file')
|
||||
print_entries('total')
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
print_header('name')
|
||||
print_entries('name')
|
||||
print_entries('total')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -257,17 +394,21 @@ if __name__ == "__main__":
|
||||
description="Summarize measurements")
|
||||
parser.add_argument('csv_paths', nargs='*', default='-',
|
||||
help="Description of where to find *.csv files. May be a directory \
|
||||
or list of paths. *.csv files will be merged to show the total \
|
||||
coverage.")
|
||||
or list of paths.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all objects, not just the ones that changed.")
|
||||
parser.add_argument('-e', '--all-fields', action='store_true',
|
||||
help="Show all fields, even those with no results.")
|
||||
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
|
||||
parser.add_argument('-f', '--fields',
|
||||
type=lambda x: set(re.split('\s*,\s*', x)),
|
||||
default=FIELDS,
|
||||
help="Comma separated list of fields to print, by default all fields \
|
||||
that are found in the CSV files are printed.")
|
||||
that are found in the CSV files are printed. \"all\" prints all \
|
||||
fields this script knows. Defaults to %r." % FIELDS)
|
||||
parser.add_argument('-s', '--sort',
|
||||
help="Sort by this field.")
|
||||
parser.add_argument('-S', '--reverse-sort',
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user