usd.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. """
  2. pygments.lexers.usd
  3. ~~~~~~~~~~~~~~~~~~~
  4. The module that parses Pixar's Universal Scene Description file format.
  5. :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
  6. :license: BSD, see LICENSE for details.
  7. """
  8. from pygments.lexer import RegexLexer, bygroups
  9. from pygments.lexer import words as words_
  10. from pygments.lexers._usd_builtins import COMMON_ATTRIBUTES, KEYWORDS, \
  11. OPERATORS, SPECIAL_NAMES, TYPES
  12. from pygments.token import Comment, Keyword, Name, Number, Operator, \
  13. Punctuation, String, Text, Whitespace
  14. __all__ = ["UsdLexer"]
  15. def _keywords(words, type_):
  16. return [(words_(words, prefix=r"\b", suffix=r"\b"), type_)]
  17. _TYPE = r"(\w+(?:\[\])?)"
  18. _BASE_ATTRIBUTE = r"(\w+(?:\:\w+)*)(?:(\.)(timeSamples))?"
  19. _WHITESPACE = r"([ \t]+)"
  20. class UsdLexer(RegexLexer):
  21. """
  22. A lexer that parses Pixar's Universal Scene Description file format.
  23. .. versionadded:: 2.6
  24. """
  25. name = "USD"
  26. url = 'https://graphics.pixar.com/usd/release/index.html'
  27. aliases = ["usd", "usda"]
  28. filenames = ["*.usd", "*.usda"]
  29. tokens = {
  30. "root": [
  31. (r"(custom){_WHITESPACE}(uniform)(\s+){}(\s+){}(\s*)(=)".format(
  32. _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
  33. bygroups(Keyword.Token, Whitespace, Keyword.Token, Whitespace,
  34. Keyword.Type, Whitespace, Name.Attribute, Text,
  35. Name.Keyword.Tokens, Whitespace, Operator)),
  36. (r"(custom){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
  37. _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
  38. bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
  39. Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
  40. Operator)),
  41. (r"(uniform){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
  42. _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
  43. bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
  44. Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
  45. Operator)),
  46. (r"{}{_WHITESPACE}{}(\s*)(=)".format(
  47. _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
  48. bygroups(Keyword.Type, Whitespace, Name.Attribute, Text,
  49. Name.Keyword.Tokens, Whitespace, Operator)),
  50. ] +
  51. _keywords(KEYWORDS, Keyword.Tokens) +
  52. _keywords(SPECIAL_NAMES, Name.Builtins) +
  53. _keywords(COMMON_ATTRIBUTES, Name.Attribute) +
  54. [(r"\b\w+:[\w:]+\b", Name.Attribute)] +
  55. _keywords(OPERATORS, Operator) + # more attributes
  56. [(type_ + r"\[\]", Keyword.Type) for type_ in TYPES] +
  57. _keywords(TYPES, Keyword.Type) +
  58. [
  59. (r"[(){}\[\]]", Punctuation),
  60. ("#.*?$", Comment.Single),
  61. (",", Punctuation),
  62. (";", Punctuation), # ";"s are allowed to combine separate metadata lines
  63. ("=", Operator),
  64. (r"[-]*([0-9]*[.])?[0-9]+(?:e[+-]*\d+)?", Number),
  65. (r"'''(?:.|\n)*?'''", String),
  66. (r'"""(?:.|\n)*?"""', String),
  67. (r"'.*?'", String),
  68. (r'".*?"', String),
  69. (r"<(\.\./)*([\w/]+|[\w/]+\.\w+[\w:]*)>", Name.Namespace),
  70. (r"@.*?@", String.Interpol),
  71. (r'\(.*"[.\\n]*".*\)', String.Doc),
  72. (r"\A#usda .+$", Comment.Hashbang),
  73. (r"\s+", Whitespace),
  74. (r"\w+", Text),
  75. (r"[_:.]+", Punctuation),
  76. ],
  77. }