diff --git a/sz.py b/sz.py index bc880c80..20e0dd71 100755 --- a/sz.py +++ b/sz.py @@ -4,7 +4,6 @@ import token import tokenize import itertools from tabulate import tabulate -from tinygrad.helpers import getenv TOKEN_WHITELIST = [token.OP, token.NAME, token.NUMBER, token.STRING] @@ -17,10 +16,8 @@ def gen_stats(base_path="."): relfilepath = os.path.relpath(filepath, base_path) with tokenize.open(filepath) as file_: tokens = [t for t in tokenize.generate_tokens(file_.readline) if t.type in TOKEN_WHITELIST] - if getenv("REALCOUNT"): - token_count, line_count = len(tokens), len(set([x for t in tokens for x in range(t.start[0], t.end[0]+1)])) - else: - token_count, line_count = len(tokens), len(set([t.start[0] for t in tokens])) + #token_count, line_count = len(tokens), len(set([x for t in tokens for x in range(t.start[0], t.end[0]+1)])) + token_count, line_count = len(tokens), len(set([t.start[0] for t in tokens])) table.append([relfilepath, line_count, token_count/line_count]) return table @@ -71,5 +68,5 @@ if __name__ == "__main__": print(f"{dir_name:30s} : {sum([x[1] for x in group]):6d}") total_lines = sum([x[1] for x in table]) print(f"\ntotal line count: {total_lines}") - max_line_count = getenv("MAX_LINE_COUNT", -1) + max_line_count = int(os.getenv("MAX_LINE_COUNT", "-1")) assert max_line_count == -1 or total_lines < max_line_count, f"OVER {max_line_count} LINES"