2023-05-30 00:21:18 +08:00
|
|
|
#!/usr/bin/env python3
|
2023-09-16 12:43:47 +08:00
|
|
|
import os, sys
|
2023-05-30 00:21:18 +08:00
|
|
|
import token
|
|
|
|
import tokenize
|
|
|
|
import itertools
|
|
|
|
from tabulate import tabulate
|
|
|
|
|
|
|
|
TOKEN_WHITELIST = [token.OP, token.NAME, token.NUMBER, token.STRING]
|
|
|
|
|
2024-04-19 20:16:10 +08:00
|
|
|
def is_docstring(t):
|
|
|
|
return t.type == token.STRING and t.string.startswith('"""') and t.line.strip().startswith('"""')
|
|
|
|
|
2023-09-16 12:43:47 +08:00
|
|
|
def gen_stats(base_path="."):
|
2023-05-30 00:21:18 +08:00
|
|
|
table = []
|
2023-09-16 12:43:47 +08:00
|
|
|
for path, _, files in os.walk(os.path.join(base_path, "tinygrad")):
|
2023-05-30 00:21:18 +08:00
|
|
|
for name in files:
|
|
|
|
if not name.endswith(".py"): continue
|
2024-01-27 04:44:19 +08:00
|
|
|
if 'tinygrad/runtime/autogen' in path: continue
|
2023-09-16 12:43:47 +08:00
|
|
|
filepath = os.path.join(path, name)
|
|
|
|
relfilepath = os.path.relpath(filepath, base_path)
|
2023-05-30 00:21:18 +08:00
|
|
|
with tokenize.open(filepath) as file_:
|
2024-04-19 20:16:10 +08:00
|
|
|
tokens = [t for t in tokenize.generate_tokens(file_.readline) if t.type in TOKEN_WHITELIST and not is_docstring(t)]
|
2024-01-02 03:01:22 +08:00
|
|
|
token_count, line_count = len(tokens), len(set([x for t in tokens for x in range(t.start[0], t.end[0]+1)]))
|
2024-04-23 15:56:03 +08:00
|
|
|
if line_count > 0: table.append([relfilepath, line_count, token_count/line_count])
|
2023-09-16 12:43:47 +08:00
|
|
|
return table
|
|
|
|
|
|
|
|
def gen_diff(table_old, table_new):
|
|
|
|
table = []
|
|
|
|
files_new = set([x[0] for x in table_new])
|
|
|
|
files_old = set([x[0] for x in table_old])
|
|
|
|
added, deleted, unchanged = files_new - files_old, files_old - files_new, files_new & files_old
|
|
|
|
if added:
|
|
|
|
for file in added:
|
|
|
|
file_stat = [stats for stats in table_new if file in stats]
|
|
|
|
table.append([file_stat[0][0], file_stat[0][1], file_stat[0][1]-0, file_stat[0][2], file_stat[0][2]-0])
|
|
|
|
if deleted:
|
|
|
|
for file in deleted:
|
|
|
|
file_stat = [stats for stats in table_old if file in stats]
|
|
|
|
table.append([file_stat[0][0], 0, 0 - file_stat[0][1], 0, 0-file_stat[0][2]])
|
|
|
|
if unchanged:
|
|
|
|
for file in unchanged:
|
|
|
|
file_stat_old = [stats for stats in table_old if file in stats]
|
|
|
|
file_stat_new = [stats for stats in table_new if file in stats]
|
2023-09-22 07:20:27 +08:00
|
|
|
if file_stat_new[0][1]-file_stat_old[0][1] != 0 or file_stat_new[0][2]-file_stat_old[0][2] != 0:
|
2024-01-04 12:53:33 +08:00
|
|
|
table.append([file_stat_new[0][0], file_stat_new[0][1], file_stat_new[0][1]-file_stat_old[0][1], file_stat_new[0][2],
|
|
|
|
file_stat_new[0][2]-file_stat_old[0][2]])
|
2023-09-16 12:43:47 +08:00
|
|
|
return table
|
2023-05-30 00:21:18 +08:00
|
|
|
|
2023-09-16 12:43:47 +08:00
|
|
|
def display_diff(diff): return "+"+str(diff) if diff > 0 else str(diff)
|
2023-05-30 00:21:18 +08:00
|
|
|
|
2023-09-16 12:43:47 +08:00
|
|
|
if __name__ == "__main__":
|
|
|
|
if len(sys.argv) == 3:
|
|
|
|
headers = ["Name", "Lines", "Diff", "Tokens/Line", "Diff"]
|
|
|
|
table = gen_diff(gen_stats(sys.argv[1]), gen_stats(sys.argv[2]))
|
|
|
|
elif len(sys.argv) == 2:
|
|
|
|
headers = ["Name", "Lines", "Tokens/Line"]
|
|
|
|
table = gen_stats(sys.argv[1])
|
|
|
|
else:
|
|
|
|
headers = ["Name", "Lines", "Tokens/Line"]
|
|
|
|
table = gen_stats(".")
|
2023-05-30 00:21:18 +08:00
|
|
|
|
2023-09-16 12:43:47 +08:00
|
|
|
if table:
|
|
|
|
if len(sys.argv) == 3:
|
|
|
|
print("### Changes")
|
|
|
|
print("```")
|
2024-01-04 12:53:33 +08:00
|
|
|
print(tabulate([headers] + sorted(table, key=lambda x: -x[1]), headers="firstrow", intfmt=(..., "d", "+d"),
|
|
|
|
floatfmt=(..., ..., ..., ".1f", "+.1f"))+"\n")
|
2023-09-16 12:43:47 +08:00
|
|
|
print(f"\ntotal lines changes: {display_diff(sum([x[2] for x in table]))}")
|
|
|
|
print("```")
|
|
|
|
else:
|
|
|
|
print(tabulate([headers] + sorted(table, key=lambda x: -x[1]), headers="firstrow", floatfmt=".1f")+"\n")
|
|
|
|
for dir_name, group in itertools.groupby(sorted([(x[0].rsplit("/", 1)[0], x[1], x[2]) for x in table]), key=lambda x:x[0]):
|
|
|
|
print(f"{dir_name:30s} : {sum([x[1] for x in group]):6d}")
|
2023-12-19 12:30:06 +08:00
|
|
|
total_lines = sum([x[1] for x in table])
|
|
|
|
print(f"\ntotal line count: {total_lines}")
|
2023-12-19 12:49:46 +08:00
|
|
|
max_line_count = int(os.getenv("MAX_LINE_COUNT", "-1"))
|
2024-09-30 21:54:46 +08:00
|
|
|
assert max_line_count == -1 or total_lines <= max_line_count, f"OVER {max_line_count} LINES"
|