update pylint path to check indent/space for all (#6022)

also fixed many errors. it was not checking nested dirs. exclude autogen for now.

can we use ruff for this?
This commit is contained in:
chenyu 2024-08-10 14:41:09 -04:00 committed by GitHub
parent cfb04c67d1
commit e6c7c3e499
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 34 additions and 34 deletions

View File

@ -87,8 +87,8 @@ jobs:
key: linting-packages-${{ hashFiles('**/setup.py') }}-3.8
- name: Install dependencies
run: pip install -e '.[linting,testing,docs]' --extra-index-url https://download.pytorch.org/whl/cpu
- name: Lint with pylint
run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' **/*.py
- name: Lint bad-indentation and trailing-whitespace with pylint
run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' --recursive=y .
- name: Lint with ruff
run: |
pip3 install --upgrade --force-reinstall ruff

View File

@ -7,7 +7,7 @@ extension-pkg-whitelist=scipy,cereal.messaging.messaging_pyx,PyQt5,av
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
ignore=CVS,autogen,msm_kgsl.py
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.

View File

@ -7,7 +7,7 @@ import gdown
from tqdm import tqdm
from tinygrad.helpers import getenv
def gdrive_download(url:str, path:str):
def gdrive_download(url:str, path:str):
if not os.path.exists(path): gdown.download(url, path)
def wikipedia_uncompress_and_extract(file:str, path:str, small:bool=False):
@ -47,8 +47,8 @@ def download_wikipedia(path:str):
gdrive_download("https://drive.google.com/uc?id=1tmMgLwoBvbEJEHXh77sqrXYw5RpqT8R_", os.path.join(path, "bert_reference_results_text_md5.txt"))
gdrive_download("https://drive.google.com/uc?id=14xV2OUGSQDG_yDBrmbSdcDC-QGeqpfs_", os.path.join(path, "results_text.tar.gz"))
wikipedia_uncompress_and_extract(os.path.join(path, "results_text.tar.gz"), path)
if getenv("VERIFY_CHECKSUM", 0):
if getenv("VERIFY_CHECKSUM", 0):
verify_checksum(os.path.join(path, "results4"), os.path.join(path, "bert_reference_results_text_md5.txt"))
if __name__ == "__main__":
if __name__ == "__main__":
download_wikipedia(getenv("BASEDIR", os.path.join(Path(__file__).parent / "wiki")))

View File

@ -62,7 +62,7 @@ class AMDQueue():
self.wptr = to_mv(wptr, 8).cast("Q")
class PM4Executor(AMDQueue):
def __init__(self, gpu, base, size, rptr, wptr):
def __init__(self, gpu, base, size, rptr, wptr):
self.gpu = gpu
super().__init__(base, size, rptr, wptr)
@ -79,7 +79,7 @@ class PM4Executor(AMDQueue):
op = (header >> 8) & 0xFF
n = (header >> 16) & 0x3FFF
assert packet_type == 3, "Can parse only packet3"
if op == amd_gpu.PACKET3_SET_SH_REG: self._exec_set_sh_reg(n)
if op == amd_gpu.PACKET3_SET_SH_REG: self._exec_set_sh_reg(n)
elif op == amd_gpu.PACKET3_ACQUIRE_MEM: self._exec_acquire_mem(n)
elif op == amd_gpu.PACKET3_RELEASE_MEM: self._exec_release_mem(n)
elif op == amd_gpu.PACKET3_WAIT_REG_MEM: cont = self._exec_wait_reg_mem(n)
@ -111,7 +111,7 @@ class PM4Executor(AMDQueue):
if mem_data_sel == 1 or mem_data_sel == 2: ptr.cast('Q')[0] = val
elif mem_data_sel == 3:
if mem_event_type == CACHE_FLUSH_AND_INV_TS_EVENT: ptr.cast('Q')[0] = int(time.perf_counter() * 1e8)
else: raise RuntimeError(f"Unknown {mem_data_sel=} {mem_event_type=}")
else: raise RuntimeError(f"Unknown {mem_data_sel=} {mem_event_type=}")
else: raise RuntimeError(f"Unknown {mem_data_sel=}")
def _exec_wait_reg_mem(self, n):
@ -180,7 +180,7 @@ class PM4Executor(AMDQueue):
_ = self._next_dword() # do not emulate events for now
class SDMAExecutor(AMDQueue):
def __init__(self, gpu, base, size, rptr, wptr):
def __init__(self, gpu, base, size, rptr, wptr):
self.gpu, self.base = gpu, base
super().__init__(base, size, rptr, wptr)

View File

@ -75,7 +75,7 @@ class VirtFile():
@staticmethod
def build_fstat(st_dev=0x20, st_ino=0x100000, st_mode=0o100777, st_nlink=1, st_uid=0, st_gid=0, st_rdev=0, st_size=0,
st_blksize=4096, st_blocks=0, st_atime=0, st_mtime=0, st_ctime=0):
st_blksize=4096, st_blocks=0, st_atime=0, st_mtime=0, st_ctime=0):
assert (ssz:=struct.calcsize(fmt_string:='QQQIIIQQiQqqq')) == 96, f"{ssz} != 96"
return struct.pack(fmt_string, st_dev, st_ino, st_nlink, st_mode, st_uid, st_gid,
st_rdev, st_size, st_blksize, st_blocks, st_atime, st_mtime, st_ctime)

View File

@ -169,7 +169,7 @@ class TrackedMemoryView:
self.mv[index] = value
self.wcb(self.mv, index)
def cast(self, new_type, **kwargs):
def cast(self, new_type, **kwargs):
self.mv = self.mv.cast(new_type, **kwargs)
return self

View File

@ -43,7 +43,7 @@ class NVDevFileDesc(VirtFileDesc):
self._mapping_userland = False
def ioctl(self, fd, request, argp): return self.driver.dev_ioctl(self.gpu, request, argp)
def mmap(self, start, sz, prot, flags, fd, offset):
def mmap(self, start, sz, prot, flags, fd, offset):
start = libc.mmap(start, sz, prot, flags|mmap.MAP_ANONYMOUS, -1, 0)
if self._mapping_userland: self.driver.track_address(start, start+sz, lambda mv,off: None, lambda mv, off: self.driver._gpu_mmio_write(mv, off, self.gpu))
return start
@ -81,7 +81,7 @@ class NVDriver(VirtDriver):
self.gpus[gpu_id] = NVGPU(gpu_id)
self.tracked_files += [VirtFile(f'/dev/nvidia{gpu_id}', functools.partial(NVDevFileDesc, driver=self, gpu=self.gpus[gpu_id]))]
def open(self, name, flags, mode, virtfile):
def open(self, name, flags, mode, virtfile):
cl = virtfile.fdcls(self._alloc_fd())
self.opened_fds[cl.fd] = cl
return cl
@ -185,7 +185,7 @@ class NVDriver(VirtDriver):
elif nr == nv_gpu.NV_ESC_RM_MAP_MEMORY:
st:Any = nv_gpu.nv_ioctl_nvos33_parameters_with_fd.from_address(argp)
obj = self.object_by_handle[st.params.hMemory]
if isinstance(obj, NVUserMode):
if isinstance(obj, NVUserMode):
file = self.opened_fds[st.fd]
assert isinstance(file, NVDevFileDesc)
file._mapping_userland = True
@ -208,7 +208,7 @@ class NVDriver(VirtDriver):
assert any(all(st.gpu_uuid.uuid[i] == gpu.gpu_uuid()[i] for i in range(16)) for gpu in self.gpus.values())
elif nr == nv_gpu.UVM_REGISTER_GPU_VASPACE: pass
elif nr == nv_gpu.UVM_ENABLE_PEER_ACCESS: pass # uvm and shared spaced are setup already, no emulation for now
elif nr == nv_gpu.UVM_CREATE_EXTERNAL_RANGE:
elif nr == nv_gpu.UVM_CREATE_EXTERNAL_RANGE:
st = nv_gpu.UVM_CREATE_EXTERNAL_RANGE_PARAMS.from_address(argp)
libc.mmap(st.base, st.length, mmap.PROT_READ|mmap.PROT_WRITE, MAP_FIXED|mmap.MAP_SHARED|mmap.MAP_ANONYMOUS, -1, 0)
elif nr == nv_gpu.UVM_MAP_EXTERNAL_ALLOCATION:

View File

@ -52,7 +52,7 @@ class GPFIFO:
def _state(self, reg): return self.state[reg]
def _state64(self, reg): return (self.state[reg] << 32) + self.state[reg + 4]
def _state64_le(self, reg): return (self.state[reg + 4] << 32) + self.state[reg]
def _reset_buf_state(self): self.buf, self.buf_ptr = None, 0
def _set_buf_state(self, gpfifo_entry):
ptr = ((gpfifo_entry >> 2) & 0xfffffffff) << 2

View File

@ -44,11 +44,11 @@ class BertForPretraining:
"""Default is BERT-large"""
self.bert = Bert(hidden_size, intermediate_size, max_position_embeddings, num_attention_heads, num_hidden_layers, type_vocab_size, vocab_size, attention_probs_dropout_prob, hidden_dropout_prob)
self.cls = BertPreTrainingHeads(hidden_size, vocab_size, self.bert.embeddings.word_embeddings.weight)
def __call__(self, input_ids:Tensor, attention_mask:Tensor, masked_lm_positions:Tensor, token_type_ids:Tensor):
output = self.bert(input_ids, attention_mask, token_type_ids)
return self.cls(output, masked_lm_positions)
def loss(self, prediction_logits:Tensor, seq_relationship_logits:Tensor, masked_lm_ids:Tensor, masked_lm_weights:Tensor, next_sentence_labels:Tensor):
# Reference has residual on denominator: https://github.com/mlcommons/training/blob/master/language_model/tensorflow/bert/run_pretraining.py#L315
def sparse_categorical_crossentropy(predictions:Tensor, labels:Tensor, ignore_index=-1):
@ -60,7 +60,7 @@ class BertForPretraining:
masked_lm_loss = sparse_categorical_crossentropy(prediction_logits, masked_lm_ids, ignore_index=masked_lm_weights)
next_sentence_loss = seq_relationship_logits.binary_crossentropy_logits(next_sentence_labels)
return masked_lm_loss + next_sentence_loss
def accuracy(self, prediction_logits:Tensor, seq_relationship_logits:Tensor, masked_lm_ids:Tensor, masked_lm_weights:Tensor, next_sentence_labels:Tensor):
valid = masked_lm_ids != 0
@ -73,7 +73,7 @@ class BertForPretraining:
next_sentence_loss = seq_relationship_logits.binary_crossentropy_logits(next_sentence_labels)
return masked_lm_accuracy.sum() / valid.sum(), seq_relationship_accuracy.mean(), masked_lm_loss, next_sentence_loss
def load_from_pretrained(self, tf_weight_path:str=Path(__file__).parent.parent / "datasets" / "wiki"):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Mute tf flag info
# load from tensorflow
@ -148,7 +148,7 @@ class BertPredictionHeadTransform:
self.LayerNorm = LayerNorm(hidden_size, eps=1e-12)
def __call__(self, hidden_states:Tensor):
return self.LayerNorm(gelu(self.dense(hidden_states)))
return self.LayerNorm(gelu(self.dense(hidden_states)))
class BertPooler:
def __init__(self, hidden_size:int):

View File

@ -315,7 +315,7 @@ class Open:
pooled = x[:, text.argmax(dim=-1)] @ self.text_projection
return pooled
class ClipVisionTransformer:
def __init__(self, width:int, layers:int, d_head:int, image_size:int, patch_size:int):
grid_size = image_size // patch_size

View File

@ -202,7 +202,7 @@ def _dump_qmd(address, packets):
bits = vv
if isinstance(vv, types.FunctionType):
bits = vv(0)
if bits is not None:
res = 0
for bt in range(bits[1], bits[0]+1): res |= ((gpfifo[i + 3 + bt // 32] >> (bt % 32)) & 0x1) << (bt - bits[1])

View File

@ -21,7 +21,7 @@ if __name__ == '__main__':
ast_strs = [args.ast]
elif args.file is not None:
with open(args.file, 'r') as file:
ast_strs = file.readlines()
ast_strs = file.readlines()
for i, ast_str in enumerate(ast_strs):
print(f"optimizing {i}/{len(ast_strs)}\nast={ast_str}")

View File

@ -908,10 +908,10 @@ class TestIndexing(unittest.TestCase):
numpy_testing_assert_equal_helper(out, ref)
def test_int_indices(self):
v = Tensor.randn(5, 7, 3)
numpy_testing_assert_equal_helper(v[[0, 4, 2]].shape, (3, 7, 3))
numpy_testing_assert_equal_helper(v[:, [0, 4, 2]].shape, (5, 3, 3))
numpy_testing_assert_equal_helper(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
v = Tensor.randn(5, 7, 3)
numpy_testing_assert_equal_helper(v[[0, 4, 2]].shape, (3, 7, 3))
numpy_testing_assert_equal_helper(v[:, [0, 4, 2]].shape, (5, 3, 3))
numpy_testing_assert_equal_helper(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
# TODO fancy setitem
'''
@ -983,7 +983,7 @@ class TestIndexing(unittest.TestCase):
numpy_testing_assert_equal_helper(x[[1, 2]].shape, (2, 0))
numpy_testing_assert_equal_helper(x[[], []].shape, (0,))
with self.assertRaises(IndexError):
x[:, [0, 1]]
x[:, [0, 1]]
def test_empty_slice(self):
x = Tensor.randn(2, 3, 4, 5)
@ -1036,8 +1036,8 @@ class TestIndexing(unittest.TestCase):
a[...] = neg_ones_expanded * 4
numpy_testing_assert_equal_helper(a, neg_ones * 4)
if a.dim() == 0:
with self.assertRaises(IndexError):
a[:] = neg_ones_expanded * 5
with self.assertRaises(IndexError):
a[:] = neg_ones_expanded * 5
@unittest.skip("bool indexing not supported")
def test_index_scalar_with_bool_mask(self):

View File

@ -46,7 +46,7 @@ class TestSymbolic(unittest.TestCase):
self.helper_test_variable(expr, 0, 1, "(((idx1*4)+FLOAT4_INDEX)<512)")
def test_div_reduction(self):
self.helper_test_variable(Variable("a", 2, 3)//2, 1, 1, "1")
self.helper_test_variable(Variable("a", 2, 3)//2, 1, 1, "1")
def test_var_becomes_num(self):
assert isinstance(Variable("a", 2, 2), NumNode)

View File

@ -100,7 +100,7 @@ class TestSymbolic(unittest.TestCase):
self.helper_test_variable(expr, 0, 1, "(((idx1*4)+FLOAT4_INDEX)<512)")
def test_div_reduction(self):
self.helper_test_variable(Variable("a", 2, 3)//2, 1, 1, "1")
self.helper_test_variable(Variable("a", 2, 3)//2, 1, 1, "1")
#def test_var_becomes_num(self):
# assert isinstance(Variable("a", 2, 2), NumNode)