tabnanny.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. #! /usr/bin/env python3
  2. """The Tab Nanny despises ambiguous indentation. She knows no mercy.
  3. tabnanny -- Detection of ambiguous indentation
  4. For the time being this module is intended to be called as a script.
  5. However it is possible to import it into an IDE and use the function
  6. check() described below.
  7. Warning: The API provided by this module is likely to change in future
  8. releases; such changes may not be backward compatible.
  9. """
  10. # Released to the public domain, by Tim Peters, 15 April 1998.
  11. # XXX Note: this is now a standard library module.
  12. # XXX The API needs to undergo changes however; the current code is too
  13. # XXX script-like. This will be addressed later.
  14. __version__ = "6"
  15. import os
  16. import sys
  17. import tokenize
  18. __all__ = ["check", "NannyNag", "process_tokens"]
  19. verbose = 0
  20. filename_only = 0
  21. def errprint(*args):
  22. sep = ""
  23. for arg in args:
  24. sys.stderr.write(sep + str(arg))
  25. sep = " "
  26. sys.stderr.write("\n")
  27. sys.exit(1)
  28. def main():
  29. import getopt
  30. global verbose, filename_only
  31. try:
  32. opts, args = getopt.getopt(sys.argv[1:], "qv")
  33. except getopt.error as msg:
  34. errprint(msg)
  35. for o, a in opts:
  36. if o == '-q':
  37. filename_only = filename_only + 1
  38. if o == '-v':
  39. verbose = verbose + 1
  40. if not args:
  41. errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
  42. for arg in args:
  43. check(arg)
  44. class NannyNag(Exception):
  45. """
  46. Raised by process_tokens() if detecting an ambiguous indent.
  47. Captured and handled in check().
  48. """
  49. def __init__(self, lineno, msg, line):
  50. self.lineno, self.msg, self.line = lineno, msg, line
  51. def get_lineno(self):
  52. return self.lineno
  53. def get_msg(self):
  54. return self.msg
  55. def get_line(self):
  56. return self.line
  57. def check(file):
  58. """check(file_or_dir)
  59. If file_or_dir is a directory and not a symbolic link, then recursively
  60. descend the directory tree named by file_or_dir, checking all .py files
  61. along the way. If file_or_dir is an ordinary Python source file, it is
  62. checked for whitespace related problems. The diagnostic messages are
  63. written to standard output using the print statement.
  64. """
  65. if os.path.isdir(file) and not os.path.islink(file):
  66. if verbose:
  67. print("%r: listing directory" % (file,))
  68. names = os.listdir(file)
  69. for name in names:
  70. fullname = os.path.join(file, name)
  71. if (os.path.isdir(fullname) and
  72. not os.path.islink(fullname) or
  73. os.path.normcase(name[-3:]) == ".py"):
  74. check(fullname)
  75. return
  76. try:
  77. f = tokenize.open(file)
  78. except OSError as msg:
  79. errprint("%r: I/O Error: %s" % (file, msg))
  80. return
  81. if verbose > 1:
  82. print("checking %r ..." % file)
  83. try:
  84. process_tokens(tokenize.generate_tokens(f.readline))
  85. except tokenize.TokenError as msg:
  86. errprint("%r: Token Error: %s" % (file, msg))
  87. return
  88. except IndentationError as msg:
  89. errprint("%r: Indentation Error: %s" % (file, msg))
  90. return
  91. except SyntaxError as msg:
  92. errprint("%r: Syntax Error: %s" % (file, msg))
  93. return
  94. except NannyNag as nag:
  95. badline = nag.get_lineno()
  96. line = nag.get_line()
  97. if verbose:
  98. print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
  99. print("offending line: %r" % (line,))
  100. print(nag.get_msg())
  101. else:
  102. if ' ' in file: file = '"' + file + '"'
  103. if filename_only: print(file)
  104. else: print(file, badline, repr(line))
  105. return
  106. finally:
  107. f.close()
  108. if verbose:
  109. print("%r: Clean bill of health." % (file,))
  110. class Whitespace:
  111. # the characters used for space and tab
  112. S, T = ' \t'
  113. # members:
  114. # raw
  115. # the original string
  116. # n
  117. # the number of leading whitespace characters in raw
  118. # nt
  119. # the number of tabs in raw[:n]
  120. # norm
  121. # the normal form as a pair (count, trailing), where:
  122. # count
  123. # a tuple such that raw[:n] contains count[i]
  124. # instances of S * i + T
  125. # trailing
  126. # the number of trailing spaces in raw[:n]
  127. # It's A Theorem that m.indent_level(t) ==
  128. # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
  129. # is_simple
  130. # true iff raw[:n] is of the form (T*)(S*)
  131. def __init__(self, ws):
  132. self.raw = ws
  133. S, T = Whitespace.S, Whitespace.T
  134. count = []
  135. b = n = nt = 0
  136. for ch in self.raw:
  137. if ch == S:
  138. n = n + 1
  139. b = b + 1
  140. elif ch == T:
  141. n = n + 1
  142. nt = nt + 1
  143. if b >= len(count):
  144. count = count + [0] * (b - len(count) + 1)
  145. count[b] = count[b] + 1
  146. b = 0
  147. else:
  148. break
  149. self.n = n
  150. self.nt = nt
  151. self.norm = tuple(count), b
  152. self.is_simple = len(count) <= 1
  153. # return length of longest contiguous run of spaces (whether or not
  154. # preceding a tab)
  155. def longest_run_of_spaces(self):
  156. count, trailing = self.norm
  157. return max(len(count)-1, trailing)
  158. def indent_level(self, tabsize):
  159. # count, il = self.norm
  160. # for i in range(len(count)):
  161. # if count[i]:
  162. # il = il + (i//tabsize + 1)*tabsize * count[i]
  163. # return il
  164. # quicker:
  165. # il = trailing + sum (i//ts + 1)*ts*count[i] =
  166. # trailing + ts * sum (i//ts + 1)*count[i] =
  167. # trailing + ts * sum i//ts*count[i] + count[i] =
  168. # trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] =
  169. # trailing + ts * [(sum i//ts*count[i]) + num_tabs]
  170. # and note that i//ts*count[i] is 0 when i < ts
  171. count, trailing = self.norm
  172. il = 0
  173. for i in range(tabsize, len(count)):
  174. il = il + i//tabsize * count[i]
  175. return trailing + tabsize * (il + self.nt)
  176. # return true iff self.indent_level(t) == other.indent_level(t)
  177. # for all t >= 1
  178. def equal(self, other):
  179. return self.norm == other.norm
  180. # return a list of tuples (ts, i1, i2) such that
  181. # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
  182. # Intended to be used after not self.equal(other) is known, in which
  183. # case it will return at least one witnessing tab size.
  184. def not_equal_witness(self, other):
  185. n = max(self.longest_run_of_spaces(),
  186. other.longest_run_of_spaces()) + 1
  187. a = []
  188. for ts in range(1, n+1):
  189. if self.indent_level(ts) != other.indent_level(ts):
  190. a.append( (ts,
  191. self.indent_level(ts),
  192. other.indent_level(ts)) )
  193. return a
  194. # Return True iff self.indent_level(t) < other.indent_level(t)
  195. # for all t >= 1.
  196. # The algorithm is due to Vincent Broman.
  197. # Easy to prove it's correct.
  198. # XXXpost that.
  199. # Trivial to prove n is sharp (consider T vs ST).
  200. # Unknown whether there's a faster general way. I suspected so at
  201. # first, but no longer.
  202. # For the special (but common!) case where M and N are both of the
  203. # form (T*)(S*), M.less(N) iff M.len() < N.len() and
  204. # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
  205. # XXXwrite that up.
  206. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
  207. def less(self, other):
  208. if self.n >= other.n:
  209. return False
  210. if self.is_simple and other.is_simple:
  211. return self.nt <= other.nt
  212. n = max(self.longest_run_of_spaces(),
  213. other.longest_run_of_spaces()) + 1
  214. # the self.n >= other.n test already did it for ts=1
  215. for ts in range(2, n+1):
  216. if self.indent_level(ts) >= other.indent_level(ts):
  217. return False
  218. return True
  219. # return a list of tuples (ts, i1, i2) such that
  220. # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
  221. # Intended to be used after not self.less(other) is known, in which
  222. # case it will return at least one witnessing tab size.
  223. def not_less_witness(self, other):
  224. n = max(self.longest_run_of_spaces(),
  225. other.longest_run_of_spaces()) + 1
  226. a = []
  227. for ts in range(1, n+1):
  228. if self.indent_level(ts) >= other.indent_level(ts):
  229. a.append( (ts,
  230. self.indent_level(ts),
  231. other.indent_level(ts)) )
  232. return a
  233. def format_witnesses(w):
  234. firsts = (str(tup[0]) for tup in w)
  235. prefix = "at tab size"
  236. if len(w) > 1:
  237. prefix = prefix + "s"
  238. return prefix + " " + ', '.join(firsts)
  239. def process_tokens(tokens):
  240. try:
  241. _process_tokens(tokens)
  242. except TabError as e:
  243. raise NannyNag(e.lineno, e.msg, e.text)
  244. def _process_tokens(tokens):
  245. INDENT = tokenize.INDENT
  246. DEDENT = tokenize.DEDENT
  247. NEWLINE = tokenize.NEWLINE
  248. JUNK = tokenize.COMMENT, tokenize.NL
  249. indents = [Whitespace("")]
  250. check_equal = 0
  251. for (type, token, start, end, line) in tokens:
  252. if type == NEWLINE:
  253. # a program statement, or ENDMARKER, will eventually follow,
  254. # after some (possibly empty) run of tokens of the form
  255. # (NL | COMMENT)* (INDENT | DEDENT+)?
  256. # If an INDENT appears, setting check_equal is wrong, and will
  257. # be undone when we see the INDENT.
  258. check_equal = 1
  259. elif type == INDENT:
  260. check_equal = 0
  261. thisguy = Whitespace(token)
  262. if not indents[-1].less(thisguy):
  263. witness = indents[-1].not_less_witness(thisguy)
  264. msg = "indent not greater e.g. " + format_witnesses(witness)
  265. raise NannyNag(start[0], msg, line)
  266. indents.append(thisguy)
  267. elif type == DEDENT:
  268. # there's nothing we need to check here! what's important is
  269. # that when the run of DEDENTs ends, the indentation of the
  270. # program statement (or ENDMARKER) that triggered the run is
  271. # equal to what's left at the top of the indents stack
  272. # Ouch! This assert triggers if the last line of the source
  273. # is indented *and* lacks a newline -- then DEDENTs pop out
  274. # of thin air.
  275. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
  276. check_equal = 1
  277. del indents[-1]
  278. elif check_equal and type not in JUNK:
  279. # this is the first "real token" following a NEWLINE, so it
  280. # must be the first token of the next program statement, or an
  281. # ENDMARKER; the "line" argument exposes the leading whitespace
  282. # for this statement; in the case of ENDMARKER, line is an empty
  283. # string, so will properly match the empty string with which the
  284. # "indents" stack was seeded
  285. check_equal = 0
  286. thisguy = Whitespace(line)
  287. if not indents[-1].equal(thisguy):
  288. witness = indents[-1].not_equal_witness(thisguy)
  289. msg = "indent not equal e.g. " + format_witnesses(witness)
  290. raise NannyNag(start[0], msg, line)
  291. if __name__ == '__main__':
  292. main()