parser.py 96 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359
  1. from fontTools.feaLib.error import FeatureLibError
  2. from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
  3. from fontTools.feaLib.variableScalar import VariableScalar
  4. from fontTools.misc.encodingTools import getEncoding
  5. from fontTools.misc.textTools import bytechr, tobytes, tostr
  6. import fontTools.feaLib.ast as ast
  7. import logging
  8. import os
  9. import re
  10. log = logging.getLogger(__name__)
  11. class Parser(object):
  12. """Initializes a Parser object.
  13. Example:
  14. .. code:: python
  15. from fontTools.feaLib.parser import Parser
  16. parser = Parser(file, font.getReverseGlyphMap())
  17. parsetree = parser.parse()
  18. Note: the ``glyphNames`` iterable serves a double role to help distinguish
  19. glyph names from ranges in the presence of hyphens and to ensure that glyph
  20. names referenced in a feature file are actually part of a font's glyph set.
  21. If the iterable is left empty, no glyph name in glyph set checking takes
  22. place, and all glyph tokens containing hyphens are treated as literal glyph
  23. names, not as ranges. (Adding a space around the hyphen can, in any case,
  24. help to disambiguate ranges from glyph names containing hyphens.)
  25. By default, the parser will follow ``include()`` statements in the feature
  26. file. To turn this off, pass ``followIncludes=False``. Pass a directory string as
  27. ``includeDir`` to explicitly declare a directory to search included feature files
  28. in.
  29. """
  30. extensions = {}
  31. ast = ast
  32. SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)}
  33. CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)}
  34. def __init__(
  35. self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs
  36. ):
  37. if "glyphMap" in kwargs:
  38. from fontTools.misc.loggingTools import deprecateArgument
  39. deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead")
  40. if glyphNames:
  41. raise TypeError(
  42. "'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive"
  43. )
  44. glyphNames = kwargs.pop("glyphMap")
  45. if kwargs:
  46. raise TypeError(
  47. "unsupported keyword argument%s: %s"
  48. % ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs))
  49. )
  50. self.glyphNames_ = set(glyphNames)
  51. self.doc_ = self.ast.FeatureFile()
  52. self.anchors_ = SymbolTable()
  53. self.glyphclasses_ = SymbolTable()
  54. self.lookups_ = SymbolTable()
  55. self.valuerecords_ = SymbolTable()
  56. self.symbol_tables_ = {self.anchors_, self.valuerecords_}
  57. self.next_token_type_, self.next_token_ = (None, None)
  58. self.cur_comments_ = []
  59. self.next_token_location_ = None
  60. lexerClass = IncludingLexer if followIncludes else NonIncludingLexer
  61. self.lexer_ = lexerClass(featurefile, includeDir=includeDir)
  62. self.missing = {}
  63. self.advance_lexer_(comments=True)
  64. def parse(self):
  65. """Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile`
  66. object representing the root of the abstract syntax tree containing the
  67. parsed contents of the file."""
  68. statements = self.doc_.statements
  69. while self.next_token_type_ is not None or self.cur_comments_:
  70. self.advance_lexer_(comments=True)
  71. if self.cur_token_type_ is Lexer.COMMENT:
  72. statements.append(
  73. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  74. )
  75. elif self.is_cur_keyword_("include"):
  76. statements.append(self.parse_include_())
  77. elif self.cur_token_type_ is Lexer.GLYPHCLASS:
  78. statements.append(self.parse_glyphclass_definition_())
  79. elif self.is_cur_keyword_(("anon", "anonymous")):
  80. statements.append(self.parse_anonymous_())
  81. elif self.is_cur_keyword_("anchorDef"):
  82. statements.append(self.parse_anchordef_())
  83. elif self.is_cur_keyword_("languagesystem"):
  84. statements.append(self.parse_languagesystem_())
  85. elif self.is_cur_keyword_("lookup"):
  86. statements.append(self.parse_lookup_(vertical=False))
  87. elif self.is_cur_keyword_("markClass"):
  88. statements.append(self.parse_markClass_())
  89. elif self.is_cur_keyword_("feature"):
  90. statements.append(self.parse_feature_block_())
  91. elif self.is_cur_keyword_("conditionset"):
  92. statements.append(self.parse_conditionset_())
  93. elif self.is_cur_keyword_("variation"):
  94. statements.append(self.parse_feature_block_(variation=True))
  95. elif self.is_cur_keyword_("table"):
  96. statements.append(self.parse_table_())
  97. elif self.is_cur_keyword_("valueRecordDef"):
  98. statements.append(self.parse_valuerecord_definition_(vertical=False))
  99. elif (
  100. self.cur_token_type_ is Lexer.NAME
  101. and self.cur_token_ in self.extensions
  102. ):
  103. statements.append(self.extensions[self.cur_token_](self))
  104. elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";":
  105. continue
  106. else:
  107. raise FeatureLibError(
  108. "Expected feature, languagesystem, lookup, markClass, "
  109. 'table, or glyph class definition, got {} "{}"'.format(
  110. self.cur_token_type_, self.cur_token_
  111. ),
  112. self.cur_token_location_,
  113. )
  114. # Report any missing glyphs at the end of parsing
  115. if self.missing:
  116. error = [
  117. " %s (first found at %s)" % (name, loc)
  118. for name, loc in self.missing.items()
  119. ]
  120. raise FeatureLibError(
  121. "The following glyph names are referenced but are missing from the "
  122. "glyph set:\n" + ("\n".join(error)),
  123. None,
  124. )
  125. return self.doc_
  126. def parse_anchor_(self):
  127. # Parses an anchor in any of the four formats given in the feature
  128. # file specification (2.e.vii).
  129. self.expect_symbol_("<")
  130. self.expect_keyword_("anchor")
  131. location = self.cur_token_location_
  132. if self.next_token_ == "NULL": # Format D
  133. self.expect_keyword_("NULL")
  134. self.expect_symbol_(">")
  135. return None
  136. if self.next_token_type_ == Lexer.NAME: # Format E
  137. name = self.expect_name_()
  138. anchordef = self.anchors_.resolve(name)
  139. if anchordef is None:
  140. raise FeatureLibError(
  141. 'Unknown anchor "%s"' % name, self.cur_token_location_
  142. )
  143. self.expect_symbol_(">")
  144. return self.ast.Anchor(
  145. anchordef.x,
  146. anchordef.y,
  147. name=name,
  148. contourpoint=anchordef.contourpoint,
  149. xDeviceTable=None,
  150. yDeviceTable=None,
  151. location=location,
  152. )
  153. x, y = self.expect_number_(variable=True), self.expect_number_(variable=True)
  154. contourpoint = None
  155. if self.next_token_ == "contourpoint": # Format B
  156. self.expect_keyword_("contourpoint")
  157. contourpoint = self.expect_number_()
  158. if self.next_token_ == "<": # Format C
  159. xDeviceTable = self.parse_device_()
  160. yDeviceTable = self.parse_device_()
  161. else:
  162. xDeviceTable, yDeviceTable = None, None
  163. self.expect_symbol_(">")
  164. return self.ast.Anchor(
  165. x,
  166. y,
  167. name=None,
  168. contourpoint=contourpoint,
  169. xDeviceTable=xDeviceTable,
  170. yDeviceTable=yDeviceTable,
  171. location=location,
  172. )
  173. def parse_anchor_marks_(self):
  174. # Parses a sequence of ``[<anchor> mark @MARKCLASS]*.``
  175. anchorMarks = [] # [(self.ast.Anchor, markClassName)*]
  176. while self.next_token_ == "<":
  177. anchor = self.parse_anchor_()
  178. if anchor is None and self.next_token_ != "mark":
  179. continue # <anchor NULL> without mark, eg. in GPOS type 5
  180. self.expect_keyword_("mark")
  181. markClass = self.expect_markClass_reference_()
  182. anchorMarks.append((anchor, markClass))
  183. return anchorMarks
  184. def parse_anchordef_(self):
  185. # Parses a named anchor definition (`section 2.e.viii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.vii>`_).
  186. assert self.is_cur_keyword_("anchorDef")
  187. location = self.cur_token_location_
  188. x, y = self.expect_number_(), self.expect_number_()
  189. contourpoint = None
  190. if self.next_token_ == "contourpoint":
  191. self.expect_keyword_("contourpoint")
  192. contourpoint = self.expect_number_()
  193. name = self.expect_name_()
  194. self.expect_symbol_(";")
  195. anchordef = self.ast.AnchorDefinition(
  196. name, x, y, contourpoint=contourpoint, location=location
  197. )
  198. self.anchors_.define(name, anchordef)
  199. return anchordef
  200. def parse_anonymous_(self):
  201. # Parses an anonymous data block (`section 10 <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#10>`_).
  202. assert self.is_cur_keyword_(("anon", "anonymous"))
  203. tag = self.expect_tag_()
  204. _, content, location = self.lexer_.scan_anonymous_block(tag)
  205. self.advance_lexer_()
  206. self.expect_symbol_("}")
  207. end_tag = self.expect_tag_()
  208. assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()"
  209. self.expect_symbol_(";")
  210. return self.ast.AnonymousBlock(tag, content, location=location)
  211. def parse_attach_(self):
  212. # Parses a GDEF Attach statement (`section 9.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.b>`_)
  213. assert self.is_cur_keyword_("Attach")
  214. location = self.cur_token_location_
  215. glyphs = self.parse_glyphclass_(accept_glyphname=True)
  216. contourPoints = {self.expect_number_()}
  217. while self.next_token_ != ";":
  218. contourPoints.add(self.expect_number_())
  219. self.expect_symbol_(";")
  220. return self.ast.AttachStatement(glyphs, contourPoints, location=location)
  221. def parse_enumerate_(self, vertical):
  222. # Parse an enumerated pair positioning rule (`section 6.b.ii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_).
  223. assert self.cur_token_ in {"enumerate", "enum"}
  224. self.advance_lexer_()
  225. return self.parse_position_(enumerated=True, vertical=vertical)
  226. def parse_GlyphClassDef_(self):
  227. # Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'
  228. assert self.is_cur_keyword_("GlyphClassDef")
  229. location = self.cur_token_location_
  230. if self.next_token_ != ",":
  231. baseGlyphs = self.parse_glyphclass_(accept_glyphname=False)
  232. else:
  233. baseGlyphs = None
  234. self.expect_symbol_(",")
  235. if self.next_token_ != ",":
  236. ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False)
  237. else:
  238. ligatureGlyphs = None
  239. self.expect_symbol_(",")
  240. if self.next_token_ != ",":
  241. markGlyphs = self.parse_glyphclass_(accept_glyphname=False)
  242. else:
  243. markGlyphs = None
  244. self.expect_symbol_(",")
  245. if self.next_token_ != ";":
  246. componentGlyphs = self.parse_glyphclass_(accept_glyphname=False)
  247. else:
  248. componentGlyphs = None
  249. self.expect_symbol_(";")
  250. return self.ast.GlyphClassDefStatement(
  251. baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location
  252. )
  253. def parse_glyphclass_definition_(self):
  254. # Parses glyph class definitions such as '@UPPERCASE = [A-Z];'
  255. location, name = self.cur_token_location_, self.cur_token_
  256. self.expect_symbol_("=")
  257. glyphs = self.parse_glyphclass_(accept_glyphname=False)
  258. self.expect_symbol_(";")
  259. glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location)
  260. self.glyphclasses_.define(name, glyphclass)
  261. return glyphclass
  262. def split_glyph_range_(self, name, location):
  263. # Since v1.20, the OpenType Feature File specification allows
  264. # for dashes in glyph names. A sequence like "a-b-c-d" could
  265. # therefore mean a single glyph whose name happens to be
  266. # "a-b-c-d", or it could mean a range from glyph "a" to glyph
  267. # "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a
  268. # range from glyph "a-b-c" to glyph "d".Technically, this
  269. # example could be resolved because the (pretty complex)
  270. # definition of glyph ranges renders most of these splits
  271. # invalid. But the specification does not say that a compiler
  272. # should try to apply such fancy heuristics. To encourage
  273. # unambiguous feature files, we therefore try all possible
  274. # splits and reject the feature file if there are multiple
  275. # splits possible. It is intentional that we don't just emit a
  276. # warning; warnings tend to get ignored. To fix the problem,
  277. # font designers can trivially add spaces around the intended
  278. # split point, and we emit a compiler error that suggests
  279. # how exactly the source should be rewritten to make things
  280. # unambiguous.
  281. parts = name.split("-")
  282. solutions = []
  283. for i in range(len(parts)):
  284. start, limit = "-".join(parts[0:i]), "-".join(parts[i:])
  285. if start in self.glyphNames_ and limit in self.glyphNames_:
  286. solutions.append((start, limit))
  287. if len(solutions) == 1:
  288. start, limit = solutions[0]
  289. return start, limit
  290. elif len(solutions) == 0:
  291. raise FeatureLibError(
  292. '"%s" is not a glyph in the font, and it can not be split '
  293. "into a range of known glyphs" % name,
  294. location,
  295. )
  296. else:
  297. ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions])
  298. raise FeatureLibError(
  299. 'Ambiguous glyph range "%s"; '
  300. "please use %s to clarify what you mean" % (name, ranges),
  301. location,
  302. )
  303. def parse_glyphclass_(self, accept_glyphname, accept_null=False):
  304. # Parses a glyph class, either named or anonymous, or (if
  305. # ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then
  306. # also accept the special NULL glyph.
  307. if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID):
  308. if accept_null and self.next_token_ == "NULL":
  309. # If you want a glyph called NULL, you should escape it.
  310. self.advance_lexer_()
  311. return self.ast.NullGlyph(location=self.cur_token_location_)
  312. glyph = self.expect_glyph_()
  313. self.check_glyph_name_in_glyph_set(glyph)
  314. return self.ast.GlyphName(glyph, location=self.cur_token_location_)
  315. if self.next_token_type_ is Lexer.GLYPHCLASS:
  316. self.advance_lexer_()
  317. gc = self.glyphclasses_.resolve(self.cur_token_)
  318. if gc is None:
  319. raise FeatureLibError(
  320. "Unknown glyph class @%s" % self.cur_token_,
  321. self.cur_token_location_,
  322. )
  323. if isinstance(gc, self.ast.MarkClass):
  324. return self.ast.MarkClassName(gc, location=self.cur_token_location_)
  325. else:
  326. return self.ast.GlyphClassName(gc, location=self.cur_token_location_)
  327. self.expect_symbol_("[")
  328. location = self.cur_token_location_
  329. glyphs = self.ast.GlyphClass(location=location)
  330. while self.next_token_ != "]":
  331. if self.next_token_type_ is Lexer.NAME:
  332. glyph = self.expect_glyph_()
  333. location = self.cur_token_location_
  334. if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_:
  335. start, limit = self.split_glyph_range_(glyph, location)
  336. self.check_glyph_name_in_glyph_set(start, limit)
  337. glyphs.add_range(
  338. start, limit, self.make_glyph_range_(location, start, limit)
  339. )
  340. elif self.next_token_ == "-":
  341. start = glyph
  342. self.expect_symbol_("-")
  343. limit = self.expect_glyph_()
  344. self.check_glyph_name_in_glyph_set(start, limit)
  345. glyphs.add_range(
  346. start, limit, self.make_glyph_range_(location, start, limit)
  347. )
  348. else:
  349. if "-" in glyph and not self.glyphNames_:
  350. log.warning(
  351. str(
  352. FeatureLibError(
  353. f"Ambiguous glyph name that looks like a range: {glyph!r}",
  354. location,
  355. )
  356. )
  357. )
  358. self.check_glyph_name_in_glyph_set(glyph)
  359. glyphs.append(glyph)
  360. elif self.next_token_type_ is Lexer.CID:
  361. glyph = self.expect_glyph_()
  362. if self.next_token_ == "-":
  363. range_location = self.cur_token_location_
  364. range_start = self.cur_token_
  365. self.expect_symbol_("-")
  366. range_end = self.expect_cid_()
  367. self.check_glyph_name_in_glyph_set(
  368. f"cid{range_start:05d}",
  369. f"cid{range_end:05d}",
  370. )
  371. glyphs.add_cid_range(
  372. range_start,
  373. range_end,
  374. self.make_cid_range_(range_location, range_start, range_end),
  375. )
  376. else:
  377. glyph_name = f"cid{self.cur_token_:05d}"
  378. self.check_glyph_name_in_glyph_set(glyph_name)
  379. glyphs.append(glyph_name)
  380. elif self.next_token_type_ is Lexer.GLYPHCLASS:
  381. self.advance_lexer_()
  382. gc = self.glyphclasses_.resolve(self.cur_token_)
  383. if gc is None:
  384. raise FeatureLibError(
  385. "Unknown glyph class @%s" % self.cur_token_,
  386. self.cur_token_location_,
  387. )
  388. if isinstance(gc, self.ast.MarkClass):
  389. gc = self.ast.MarkClassName(gc, location=self.cur_token_location_)
  390. else:
  391. gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_)
  392. glyphs.add_class(gc)
  393. else:
  394. raise FeatureLibError(
  395. "Expected glyph name, glyph range, "
  396. f"or glyph class reference, found {self.next_token_!r}",
  397. self.next_token_location_,
  398. )
  399. self.expect_symbol_("]")
  400. return glyphs
  401. def parse_glyph_pattern_(self, vertical):
  402. # Parses a glyph pattern, including lookups and context, e.g.::
  403. #
  404. # a b
  405. # a b c' d e
  406. # a b c' lookup ChangeC d e
  407. prefix, glyphs, lookups, values, suffix = ([], [], [], [], [])
  408. hasMarks = False
  409. while self.next_token_ not in {"by", "from", ";", ","}:
  410. gc = self.parse_glyphclass_(accept_glyphname=True)
  411. marked = False
  412. if self.next_token_ == "'":
  413. self.expect_symbol_("'")
  414. hasMarks = marked = True
  415. if marked:
  416. if suffix:
  417. # makeotf also reports this as an error, while FontForge
  418. # silently inserts ' in all the intervening glyphs.
  419. # https://github.com/fonttools/fonttools/pull/1096
  420. raise FeatureLibError(
  421. "Unsupported contextual target sequence: at most "
  422. "one run of marked (') glyph/class names allowed",
  423. self.cur_token_location_,
  424. )
  425. glyphs.append(gc)
  426. elif glyphs:
  427. suffix.append(gc)
  428. else:
  429. prefix.append(gc)
  430. if self.is_next_value_():
  431. values.append(self.parse_valuerecord_(vertical))
  432. else:
  433. values.append(None)
  434. lookuplist = None
  435. while self.next_token_ == "lookup":
  436. if lookuplist is None:
  437. lookuplist = []
  438. self.expect_keyword_("lookup")
  439. if not marked:
  440. raise FeatureLibError(
  441. "Lookups can only follow marked glyphs",
  442. self.cur_token_location_,
  443. )
  444. lookup_name = self.expect_name_()
  445. lookup = self.lookups_.resolve(lookup_name)
  446. if lookup is None:
  447. raise FeatureLibError(
  448. 'Unknown lookup "%s"' % lookup_name, self.cur_token_location_
  449. )
  450. lookuplist.append(lookup)
  451. if marked:
  452. lookups.append(lookuplist)
  453. if not glyphs and not suffix: # eg., "sub f f i by"
  454. assert lookups == []
  455. return ([], prefix, [None] * len(prefix), values, [], hasMarks)
  456. else:
  457. if any(values[: len(prefix)]):
  458. raise FeatureLibError(
  459. "Positioning cannot be applied in the bactrack glyph sequence, "
  460. "before the marked glyph sequence.",
  461. self.cur_token_location_,
  462. )
  463. marked_values = values[len(prefix) : len(prefix) + len(glyphs)]
  464. if any(marked_values):
  465. if any(values[len(prefix) + len(glyphs) :]):
  466. raise FeatureLibError(
  467. "Positioning values are allowed only in the marked glyph "
  468. "sequence, or after the final glyph node when only one glyph "
  469. "node is marked.",
  470. self.cur_token_location_,
  471. )
  472. values = marked_values
  473. elif values and values[-1]:
  474. if len(glyphs) > 1 or any(values[:-1]):
  475. raise FeatureLibError(
  476. "Positioning values are allowed only in the marked glyph "
  477. "sequence, or after the final glyph node when only one glyph "
  478. "node is marked.",
  479. self.cur_token_location_,
  480. )
  481. values = values[-1:]
  482. elif any(values):
  483. raise FeatureLibError(
  484. "Positioning values are allowed only in the marked glyph "
  485. "sequence, or after the final glyph node when only one glyph "
  486. "node is marked.",
  487. self.cur_token_location_,
  488. )
  489. return (prefix, glyphs, lookups, values, suffix, hasMarks)
  490. def parse_ignore_glyph_pattern_(self, sub):
  491. location = self.cur_token_location_
  492. prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
  493. vertical=False
  494. )
  495. if any(lookups):
  496. raise FeatureLibError(
  497. f'No lookups can be specified for "ignore {sub}"', location
  498. )
  499. if not hasMarks:
  500. error = FeatureLibError(
  501. f'Ambiguous "ignore {sub}", there should be least one marked glyph',
  502. location,
  503. )
  504. log.warning(str(error))
  505. suffix, glyphs = glyphs[1:], glyphs[0:1]
  506. chainContext = (prefix, glyphs, suffix)
  507. return chainContext
  508. def parse_ignore_context_(self, sub):
  509. location = self.cur_token_location_
  510. chainContext = [self.parse_ignore_glyph_pattern_(sub)]
  511. while self.next_token_ == ",":
  512. self.expect_symbol_(",")
  513. chainContext.append(self.parse_ignore_glyph_pattern_(sub))
  514. self.expect_symbol_(";")
  515. return chainContext
  516. def parse_ignore_(self):
  517. # Parses an ignore sub/pos rule.
  518. assert self.is_cur_keyword_("ignore")
  519. location = self.cur_token_location_
  520. self.advance_lexer_()
  521. if self.cur_token_ in ["substitute", "sub"]:
  522. chainContext = self.parse_ignore_context_("sub")
  523. return self.ast.IgnoreSubstStatement(chainContext, location=location)
  524. if self.cur_token_ in ["position", "pos"]:
  525. chainContext = self.parse_ignore_context_("pos")
  526. return self.ast.IgnorePosStatement(chainContext, location=location)
  527. raise FeatureLibError(
  528. 'Expected "substitute" or "position"', self.cur_token_location_
  529. )
  530. def parse_include_(self):
  531. assert self.cur_token_ == "include"
  532. location = self.cur_token_location_
  533. filename = self.expect_filename_()
  534. # self.expect_symbol_(";")
  535. return ast.IncludeStatement(filename, location=location)
  536. def parse_language_(self):
  537. assert self.is_cur_keyword_("language")
  538. location = self.cur_token_location_
  539. language = self.expect_language_tag_()
  540. include_default, required = (True, False)
  541. if self.next_token_ in {"exclude_dflt", "include_dflt"}:
  542. include_default = self.expect_name_() == "include_dflt"
  543. if self.next_token_ == "required":
  544. self.expect_keyword_("required")
  545. required = True
  546. self.expect_symbol_(";")
  547. return self.ast.LanguageStatement(
  548. language, include_default, required, location=location
  549. )
  550. def parse_ligatureCaretByIndex_(self):
  551. assert self.is_cur_keyword_("LigatureCaretByIndex")
  552. location = self.cur_token_location_
  553. glyphs = self.parse_glyphclass_(accept_glyphname=True)
  554. carets = [self.expect_number_()]
  555. while self.next_token_ != ";":
  556. carets.append(self.expect_number_())
  557. self.expect_symbol_(";")
  558. return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location)
  559. def parse_ligatureCaretByPos_(self):
  560. assert self.is_cur_keyword_("LigatureCaretByPos")
  561. location = self.cur_token_location_
  562. glyphs = self.parse_glyphclass_(accept_glyphname=True)
  563. carets = [self.expect_number_(variable=True)]
  564. while self.next_token_ != ";":
  565. carets.append(self.expect_number_(variable=True))
  566. self.expect_symbol_(";")
  567. return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location)
  568. def parse_lookup_(self, vertical):
  569. # Parses a ``lookup`` - either a lookup block, or a lookup reference
  570. # inside a feature.
  571. assert self.is_cur_keyword_("lookup")
  572. location, name = self.cur_token_location_, self.expect_name_()
  573. if self.next_token_ == ";":
  574. lookup = self.lookups_.resolve(name)
  575. if lookup is None:
  576. raise FeatureLibError(
  577. 'Unknown lookup "%s"' % name, self.cur_token_location_
  578. )
  579. self.expect_symbol_(";")
  580. return self.ast.LookupReferenceStatement(lookup, location=location)
  581. use_extension = False
  582. if self.next_token_ == "useExtension":
  583. self.expect_keyword_("useExtension")
  584. use_extension = True
  585. block = self.ast.LookupBlock(name, use_extension, location=location)
  586. self.parse_block_(block, vertical)
  587. self.lookups_.define(name, block)
  588. return block
  589. def parse_lookupflag_(self):
  590. # Parses a ``lookupflag`` statement, either specified by number or
  591. # in words.
  592. assert self.is_cur_keyword_("lookupflag")
  593. location = self.cur_token_location_
  594. # format B: "lookupflag 6;"
  595. if self.next_token_type_ == Lexer.NUMBER:
  596. value = self.expect_number_()
  597. self.expect_symbol_(";")
  598. return self.ast.LookupFlagStatement(value, location=location)
  599. # format A: "lookupflag RightToLeft MarkAttachmentType @M;"
  600. value_seen = False
  601. value, markAttachment, markFilteringSet = 0, None, None
  602. flags = {
  603. "RightToLeft": 1,
  604. "IgnoreBaseGlyphs": 2,
  605. "IgnoreLigatures": 4,
  606. "IgnoreMarks": 8,
  607. }
  608. seen = set()
  609. while self.next_token_ != ";":
  610. if self.next_token_ in seen:
  611. raise FeatureLibError(
  612. "%s can be specified only once" % self.next_token_,
  613. self.next_token_location_,
  614. )
  615. seen.add(self.next_token_)
  616. if self.next_token_ == "MarkAttachmentType":
  617. self.expect_keyword_("MarkAttachmentType")
  618. markAttachment = self.parse_glyphclass_(accept_glyphname=False)
  619. elif self.next_token_ == "UseMarkFilteringSet":
  620. self.expect_keyword_("UseMarkFilteringSet")
  621. markFilteringSet = self.parse_glyphclass_(accept_glyphname=False)
  622. elif self.next_token_ in flags:
  623. value_seen = True
  624. value = value | flags[self.expect_name_()]
  625. else:
  626. raise FeatureLibError(
  627. '"%s" is not a recognized lookupflag' % self.next_token_,
  628. self.next_token_location_,
  629. )
  630. self.expect_symbol_(";")
  631. if not any([value_seen, markAttachment, markFilteringSet]):
  632. raise FeatureLibError(
  633. "lookupflag must have a value", self.next_token_location_
  634. )
  635. return self.ast.LookupFlagStatement(
  636. value,
  637. markAttachment=markAttachment,
  638. markFilteringSet=markFilteringSet,
  639. location=location,
  640. )
  641. def parse_markClass_(self):
  642. assert self.is_cur_keyword_("markClass")
  643. location = self.cur_token_location_
  644. glyphs = self.parse_glyphclass_(accept_glyphname=True)
  645. if not glyphs.glyphSet():
  646. raise FeatureLibError(
  647. "Empty glyph class in mark class definition", location
  648. )
  649. anchor = self.parse_anchor_()
  650. name = self.expect_class_name_()
  651. self.expect_symbol_(";")
  652. markClass = self.doc_.markClasses.get(name)
  653. if markClass is None:
  654. markClass = self.ast.MarkClass(name)
  655. self.doc_.markClasses[name] = markClass
  656. self.glyphclasses_.define(name, markClass)
  657. mcdef = self.ast.MarkClassDefinition(
  658. markClass, anchor, glyphs, location=location
  659. )
  660. markClass.addDefinition(mcdef)
  661. return mcdef
  662. def parse_position_(self, enumerated, vertical):
  663. assert self.cur_token_ in {"position", "pos"}
  664. if self.next_token_ == "cursive": # GPOS type 3
  665. return self.parse_position_cursive_(enumerated, vertical)
  666. elif self.next_token_ == "base": # GPOS type 4
  667. return self.parse_position_base_(enumerated, vertical)
  668. elif self.next_token_ == "ligature": # GPOS type 5
  669. return self.parse_position_ligature_(enumerated, vertical)
  670. elif self.next_token_ == "mark": # GPOS type 6
  671. return self.parse_position_mark_(enumerated, vertical)
  672. location = self.cur_token_location_
  673. prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
  674. vertical
  675. )
  676. self.expect_symbol_(";")
  677. if any(lookups):
  678. # GPOS type 8: Chaining contextual positioning; explicit lookups
  679. if any(values):
  680. raise FeatureLibError(
  681. 'If "lookup" is present, no values must be specified', location
  682. )
  683. return self.ast.ChainContextPosStatement(
  684. prefix, glyphs, suffix, lookups, location=location
  685. )
  686. # Pair positioning, format A: "pos V 10 A -10;"
  687. # Pair positioning, format B: "pos V A -20;"
  688. if not prefix and not suffix and len(glyphs) == 2 and not hasMarks:
  689. if values[0] is None: # Format B: "pos V A -20;"
  690. values.reverse()
  691. return self.ast.PairPosStatement(
  692. glyphs[0],
  693. values[0],
  694. glyphs[1],
  695. values[1],
  696. enumerated=enumerated,
  697. location=location,
  698. )
  699. if enumerated:
  700. raise FeatureLibError(
  701. '"enumerate" is only allowed with pair positionings', location
  702. )
  703. return self.ast.SinglePosStatement(
  704. list(zip(glyphs, values)),
  705. prefix,
  706. suffix,
  707. forceChain=hasMarks,
  708. location=location,
  709. )
  710. def parse_position_cursive_(self, enumerated, vertical):
  711. location = self.cur_token_location_
  712. self.expect_keyword_("cursive")
  713. if enumerated:
  714. raise FeatureLibError(
  715. '"enumerate" is not allowed with ' "cursive attachment positioning",
  716. location,
  717. )
  718. glyphclass = self.parse_glyphclass_(accept_glyphname=True)
  719. entryAnchor = self.parse_anchor_()
  720. exitAnchor = self.parse_anchor_()
  721. self.expect_symbol_(";")
  722. return self.ast.CursivePosStatement(
  723. glyphclass, entryAnchor, exitAnchor, location=location
  724. )
  725. def parse_position_base_(self, enumerated, vertical):
  726. location = self.cur_token_location_
  727. self.expect_keyword_("base")
  728. if enumerated:
  729. raise FeatureLibError(
  730. '"enumerate" is not allowed with '
  731. "mark-to-base attachment positioning",
  732. location,
  733. )
  734. base = self.parse_glyphclass_(accept_glyphname=True)
  735. marks = self.parse_anchor_marks_()
  736. self.expect_symbol_(";")
  737. return self.ast.MarkBasePosStatement(base, marks, location=location)
  738. def parse_position_ligature_(self, enumerated, vertical):
  739. location = self.cur_token_location_
  740. self.expect_keyword_("ligature")
  741. if enumerated:
  742. raise FeatureLibError(
  743. '"enumerate" is not allowed with '
  744. "mark-to-ligature attachment positioning",
  745. location,
  746. )
  747. ligatures = self.parse_glyphclass_(accept_glyphname=True)
  748. marks = [self.parse_anchor_marks_()]
  749. while self.next_token_ == "ligComponent":
  750. self.expect_keyword_("ligComponent")
  751. marks.append(self.parse_anchor_marks_())
  752. self.expect_symbol_(";")
  753. return self.ast.MarkLigPosStatement(ligatures, marks, location=location)
  754. def parse_position_mark_(self, enumerated, vertical):
  755. location = self.cur_token_location_
  756. self.expect_keyword_("mark")
  757. if enumerated:
  758. raise FeatureLibError(
  759. '"enumerate" is not allowed with '
  760. "mark-to-mark attachment positioning",
  761. location,
  762. )
  763. baseMarks = self.parse_glyphclass_(accept_glyphname=True)
  764. marks = self.parse_anchor_marks_()
  765. self.expect_symbol_(";")
  766. return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location)
  767. def parse_script_(self):
  768. assert self.is_cur_keyword_("script")
  769. location, script = self.cur_token_location_, self.expect_script_tag_()
  770. self.expect_symbol_(";")
  771. return self.ast.ScriptStatement(script, location=location)
  772. def parse_substitute_(self):
  773. assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
  774. location = self.cur_token_location_
  775. reverse = self.cur_token_ in {"reversesub", "rsub"}
  776. (
  777. old_prefix,
  778. old,
  779. lookups,
  780. values,
  781. old_suffix,
  782. hasMarks,
  783. ) = self.parse_glyph_pattern_(vertical=False)
  784. if any(values):
  785. raise FeatureLibError(
  786. "Substitution statements cannot contain values", location
  787. )
  788. new = []
  789. if self.next_token_ == "by":
  790. keyword = self.expect_keyword_("by")
  791. while self.next_token_ != ";":
  792. gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True)
  793. new.append(gc)
  794. elif self.next_token_ == "from":
  795. keyword = self.expect_keyword_("from")
  796. new = [self.parse_glyphclass_(accept_glyphname=False)]
  797. else:
  798. keyword = None
  799. self.expect_symbol_(";")
  800. if len(new) == 0 and not any(lookups):
  801. raise FeatureLibError(
  802. 'Expected "by", "from" or explicit lookup references',
  803. self.cur_token_location_,
  804. )
  805. # GSUB lookup type 3: Alternate substitution.
  806. # Format: "substitute a from [a.1 a.2 a.3];"
  807. if keyword == "from":
  808. if reverse:
  809. raise FeatureLibError(
  810. 'Reverse chaining substitutions do not support "from"', location
  811. )
  812. if len(old) != 1 or len(old[0].glyphSet()) != 1:
  813. raise FeatureLibError('Expected a single glyph before "from"', location)
  814. if len(new) != 1:
  815. raise FeatureLibError(
  816. 'Expected a single glyphclass after "from"', location
  817. )
  818. return self.ast.AlternateSubstStatement(
  819. old_prefix, old[0], old_suffix, new[0], location=location
  820. )
  821. num_lookups = len([l for l in lookups if l is not None])
  822. is_deletion = False
  823. if len(new) == 1 and isinstance(new[0], ast.NullGlyph):
  824. new = [] # Deletion
  825. is_deletion = True
  826. # GSUB lookup type 1: Single substitution.
  827. # Format A: "substitute a by a.sc;"
  828. # Format B: "substitute [one.fitted one.oldstyle] by one;"
  829. # Format C: "substitute [a-d] by [A.sc-D.sc];"
  830. if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0:
  831. glyphs = list(old[0].glyphSet())
  832. replacements = list(new[0].glyphSet())
  833. if len(replacements) == 1:
  834. replacements = replacements * len(glyphs)
  835. if len(glyphs) != len(replacements):
  836. raise FeatureLibError(
  837. 'Expected a glyph class with %d elements after "by", '
  838. "but found a glyph class with %d elements"
  839. % (len(glyphs), len(replacements)),
  840. location,
  841. )
  842. return self.ast.SingleSubstStatement(
  843. old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location
  844. )
  845. # Glyph deletion, built as GSUB lookup type 2: Multiple substitution
  846. # with empty replacement.
  847. if is_deletion and len(old) == 1 and num_lookups == 0:
  848. return self.ast.MultipleSubstStatement(
  849. old_prefix,
  850. old[0],
  851. old_suffix,
  852. (),
  853. forceChain=hasMarks,
  854. location=location,
  855. )
  856. # GSUB lookup type 2: Multiple substitution.
  857. # Format: "substitute f_f_i by f f i;"
  858. #
  859. # GlyphsApp introduces two additional formats:
  860. # Format 1: "substitute [f_i f_l] by [f f] [i l];"
  861. # Format 2: "substitute [f_i f_l] by f [i l];"
  862. # http://handbook.glyphsapp.com/en/layout/multiple-substitution-with-classes/
  863. if not reverse and len(old) == 1 and len(new) > 1 and num_lookups == 0:
  864. count = len(old[0].glyphSet())
  865. for n in new:
  866. if not list(n.glyphSet()):
  867. raise FeatureLibError("Empty class in replacement", location)
  868. if len(n.glyphSet()) != 1 and len(n.glyphSet()) != count:
  869. raise FeatureLibError(
  870. f'Expected a glyph class with 1 or {count} elements after "by", '
  871. f"but found a glyph class with {len(n.glyphSet())} elements",
  872. location,
  873. )
  874. return self.ast.MultipleSubstStatement(
  875. old_prefix,
  876. old[0],
  877. old_suffix,
  878. new,
  879. forceChain=hasMarks,
  880. location=location,
  881. )
  882. # GSUB lookup type 4: Ligature substitution.
  883. # Format: "substitute f f i by f_f_i;"
  884. if (
  885. not reverse
  886. and len(old) > 1
  887. and len(new) == 1
  888. and len(new[0].glyphSet()) == 1
  889. and num_lookups == 0
  890. ):
  891. return self.ast.LigatureSubstStatement(
  892. old_prefix,
  893. old,
  894. old_suffix,
  895. list(new[0].glyphSet())[0],
  896. forceChain=hasMarks,
  897. location=location,
  898. )
  899. # GSUB lookup type 8: Reverse chaining substitution.
  900. if reverse:
  901. if len(old) != 1:
  902. raise FeatureLibError(
  903. "In reverse chaining single substitutions, "
  904. "only a single glyph or glyph class can be replaced",
  905. location,
  906. )
  907. if len(new) != 1:
  908. raise FeatureLibError(
  909. "In reverse chaining single substitutions, "
  910. 'the replacement (after "by") must be a single glyph '
  911. "or glyph class",
  912. location,
  913. )
  914. if num_lookups != 0:
  915. raise FeatureLibError(
  916. "Reverse chaining substitutions cannot call named lookups", location
  917. )
  918. glyphs = sorted(list(old[0].glyphSet()))
  919. replacements = sorted(list(new[0].glyphSet()))
  920. if len(replacements) == 1:
  921. replacements = replacements * len(glyphs)
  922. if len(glyphs) != len(replacements):
  923. raise FeatureLibError(
  924. 'Expected a glyph class with %d elements after "by", '
  925. "but found a glyph class with %d elements"
  926. % (len(glyphs), len(replacements)),
  927. location,
  928. )
  929. return self.ast.ReverseChainSingleSubstStatement(
  930. old_prefix, old_suffix, old, new, location=location
  931. )
  932. if len(old) > 1 and len(new) > 1:
  933. raise FeatureLibError(
  934. "Direct substitution of multiple glyphs by multiple glyphs "
  935. "is not supported",
  936. location,
  937. )
  938. # If there are remaining glyphs to parse, this is an invalid GSUB statement
  939. if len(new) != 0 or is_deletion:
  940. raise FeatureLibError("Invalid substitution statement", location)
  941. # GSUB lookup type 6: Chaining contextual substitution.
  942. rule = self.ast.ChainContextSubstStatement(
  943. old_prefix, old, old_suffix, lookups, location=location
  944. )
  945. return rule
  946. def parse_subtable_(self):
  947. assert self.is_cur_keyword_("subtable")
  948. location = self.cur_token_location_
  949. self.expect_symbol_(";")
  950. return self.ast.SubtableStatement(location=location)
  951. def parse_size_parameters_(self):
  952. # Parses a ``parameters`` statement used in ``size`` features. See
  953. # `section 8.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.b>`_.
  954. assert self.is_cur_keyword_("parameters")
  955. location = self.cur_token_location_
  956. DesignSize = self.expect_decipoint_()
  957. SubfamilyID = self.expect_number_()
  958. RangeStart = 0.0
  959. RangeEnd = 0.0
  960. if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0:
  961. RangeStart = self.expect_decipoint_()
  962. RangeEnd = self.expect_decipoint_()
  963. self.expect_symbol_(";")
  964. return self.ast.SizeParameters(
  965. DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location
  966. )
  967. def parse_size_menuname_(self):
  968. assert self.is_cur_keyword_("sizemenuname")
  969. location = self.cur_token_location_
  970. platformID, platEncID, langID, string = self.parse_name_()
  971. return self.ast.FeatureNameStatement(
  972. "size", platformID, platEncID, langID, string, location=location
  973. )
  974. def parse_table_(self):
  975. assert self.is_cur_keyword_("table")
  976. location, name = self.cur_token_location_, self.expect_tag_()
  977. table = self.ast.TableBlock(name, location=location)
  978. self.expect_symbol_("{")
  979. handler = {
  980. "GDEF": self.parse_table_GDEF_,
  981. "head": self.parse_table_head_,
  982. "hhea": self.parse_table_hhea_,
  983. "vhea": self.parse_table_vhea_,
  984. "name": self.parse_table_name_,
  985. "BASE": self.parse_table_BASE_,
  986. "OS/2": self.parse_table_OS_2_,
  987. "STAT": self.parse_table_STAT_,
  988. }.get(name)
  989. if handler:
  990. handler(table)
  991. else:
  992. raise FeatureLibError(
  993. '"table %s" is not supported' % name.strip(), location
  994. )
  995. self.expect_symbol_("}")
  996. end_tag = self.expect_tag_()
  997. if end_tag != name:
  998. raise FeatureLibError(
  999. 'Expected "%s"' % name.strip(), self.cur_token_location_
  1000. )
  1001. self.expect_symbol_(";")
  1002. return table
  1003. def parse_table_GDEF_(self, table):
  1004. statements = table.statements
  1005. while self.next_token_ != "}" or self.cur_comments_:
  1006. self.advance_lexer_(comments=True)
  1007. if self.cur_token_type_ is Lexer.COMMENT:
  1008. statements.append(
  1009. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1010. )
  1011. elif self.is_cur_keyword_("Attach"):
  1012. statements.append(self.parse_attach_())
  1013. elif self.is_cur_keyword_("GlyphClassDef"):
  1014. statements.append(self.parse_GlyphClassDef_())
  1015. elif self.is_cur_keyword_("LigatureCaretByIndex"):
  1016. statements.append(self.parse_ligatureCaretByIndex_())
  1017. elif self.is_cur_keyword_("LigatureCaretByPos"):
  1018. statements.append(self.parse_ligatureCaretByPos_())
  1019. elif self.cur_token_ == ";":
  1020. continue
  1021. else:
  1022. raise FeatureLibError(
  1023. "Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos",
  1024. self.cur_token_location_,
  1025. )
  1026. def parse_table_head_(self, table):
  1027. statements = table.statements
  1028. while self.next_token_ != "}" or self.cur_comments_:
  1029. self.advance_lexer_(comments=True)
  1030. if self.cur_token_type_ is Lexer.COMMENT:
  1031. statements.append(
  1032. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1033. )
  1034. elif self.is_cur_keyword_("FontRevision"):
  1035. statements.append(self.parse_FontRevision_())
  1036. elif self.cur_token_ == ";":
  1037. continue
  1038. else:
  1039. raise FeatureLibError("Expected FontRevision", self.cur_token_location_)
  1040. def parse_table_hhea_(self, table):
  1041. statements = table.statements
  1042. fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
  1043. while self.next_token_ != "}" or self.cur_comments_:
  1044. self.advance_lexer_(comments=True)
  1045. if self.cur_token_type_ is Lexer.COMMENT:
  1046. statements.append(
  1047. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1048. )
  1049. elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
  1050. key = self.cur_token_.lower()
  1051. value = self.expect_number_()
  1052. statements.append(
  1053. self.ast.HheaField(key, value, location=self.cur_token_location_)
  1054. )
  1055. if self.next_token_ != ";":
  1056. raise FeatureLibError(
  1057. "Incomplete statement", self.next_token_location_
  1058. )
  1059. elif self.cur_token_ == ";":
  1060. continue
  1061. else:
  1062. raise FeatureLibError(
  1063. "Expected CaretOffset, Ascender, " "Descender or LineGap",
  1064. self.cur_token_location_,
  1065. )
  1066. def parse_table_vhea_(self, table):
  1067. statements = table.statements
  1068. fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
  1069. while self.next_token_ != "}" or self.cur_comments_:
  1070. self.advance_lexer_(comments=True)
  1071. if self.cur_token_type_ is Lexer.COMMENT:
  1072. statements.append(
  1073. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1074. )
  1075. elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
  1076. key = self.cur_token_.lower()
  1077. value = self.expect_number_()
  1078. statements.append(
  1079. self.ast.VheaField(key, value, location=self.cur_token_location_)
  1080. )
  1081. if self.next_token_ != ";":
  1082. raise FeatureLibError(
  1083. "Incomplete statement", self.next_token_location_
  1084. )
  1085. elif self.cur_token_ == ";":
  1086. continue
  1087. else:
  1088. raise FeatureLibError(
  1089. "Expected VertTypoAscender, "
  1090. "VertTypoDescender or VertTypoLineGap",
  1091. self.cur_token_location_,
  1092. )
  1093. def parse_table_name_(self, table):
  1094. statements = table.statements
  1095. while self.next_token_ != "}" or self.cur_comments_:
  1096. self.advance_lexer_(comments=True)
  1097. if self.cur_token_type_ is Lexer.COMMENT:
  1098. statements.append(
  1099. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1100. )
  1101. elif self.is_cur_keyword_("nameid"):
  1102. statement = self.parse_nameid_()
  1103. if statement:
  1104. statements.append(statement)
  1105. elif self.cur_token_ == ";":
  1106. continue
  1107. else:
  1108. raise FeatureLibError("Expected nameid", self.cur_token_location_)
  1109. def parse_name_(self):
  1110. """Parses a name record. See `section 9.e <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_."""
  1111. platEncID = None
  1112. langID = None
  1113. if self.next_token_type_ in Lexer.NUMBERS:
  1114. platformID = self.expect_any_number_()
  1115. location = self.cur_token_location_
  1116. if platformID not in (1, 3):
  1117. raise FeatureLibError("Expected platform id 1 or 3", location)
  1118. if self.next_token_type_ in Lexer.NUMBERS:
  1119. platEncID = self.expect_any_number_()
  1120. langID = self.expect_any_number_()
  1121. else:
  1122. platformID = 3
  1123. location = self.cur_token_location_
  1124. if platformID == 1: # Macintosh
  1125. platEncID = platEncID or 0 # Roman
  1126. langID = langID or 0 # English
  1127. else: # 3, Windows
  1128. platEncID = platEncID or 1 # Unicode
  1129. langID = langID or 0x0409 # English
  1130. string = self.expect_string_()
  1131. self.expect_symbol_(";")
  1132. encoding = getEncoding(platformID, platEncID, langID)
  1133. if encoding is None:
  1134. raise FeatureLibError("Unsupported encoding", location)
  1135. unescaped = self.unescape_string_(string, encoding)
  1136. return platformID, platEncID, langID, unescaped
  1137. def parse_stat_name_(self):
  1138. platEncID = None
  1139. langID = None
  1140. if self.next_token_type_ in Lexer.NUMBERS:
  1141. platformID = self.expect_any_number_()
  1142. location = self.cur_token_location_
  1143. if platformID not in (1, 3):
  1144. raise FeatureLibError("Expected platform id 1 or 3", location)
  1145. if self.next_token_type_ in Lexer.NUMBERS:
  1146. platEncID = self.expect_any_number_()
  1147. langID = self.expect_any_number_()
  1148. else:
  1149. platformID = 3
  1150. location = self.cur_token_location_
  1151. if platformID == 1: # Macintosh
  1152. platEncID = platEncID or 0 # Roman
  1153. langID = langID or 0 # English
  1154. else: # 3, Windows
  1155. platEncID = platEncID or 1 # Unicode
  1156. langID = langID or 0x0409 # English
  1157. string = self.expect_string_()
  1158. encoding = getEncoding(platformID, platEncID, langID)
  1159. if encoding is None:
  1160. raise FeatureLibError("Unsupported encoding", location)
  1161. unescaped = self.unescape_string_(string, encoding)
  1162. return platformID, platEncID, langID, unescaped
  1163. def parse_nameid_(self):
  1164. assert self.cur_token_ == "nameid", self.cur_token_
  1165. location, nameID = self.cur_token_location_, self.expect_any_number_()
  1166. if nameID > 32767:
  1167. raise FeatureLibError(
  1168. "Name id value cannot be greater than 32767", self.cur_token_location_
  1169. )
  1170. platformID, platEncID, langID, string = self.parse_name_()
  1171. return self.ast.NameRecord(
  1172. nameID, platformID, platEncID, langID, string, location=location
  1173. )
  1174. def unescape_string_(self, string, encoding):
  1175. if encoding == "utf_16_be":
  1176. s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string)
  1177. else:
  1178. unescape = lambda m: self.unescape_byte_(m, encoding)
  1179. s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string)
  1180. # We now have a Unicode string, but it might contain surrogate pairs.
  1181. # We convert surrogates to actual Unicode by round-tripping through
  1182. # Python's UTF-16 codec in a special mode.
  1183. utf16 = tobytes(s, "utf_16_be", "surrogatepass")
  1184. return tostr(utf16, "utf_16_be")
  1185. @staticmethod
  1186. def unescape_unichr_(match):
  1187. n = match.group(0)[1:]
  1188. return chr(int(n, 16))
  1189. @staticmethod
  1190. def unescape_byte_(match, encoding):
  1191. n = match.group(0)[1:]
  1192. return bytechr(int(n, 16)).decode(encoding)
  1193. def parse_table_BASE_(self, table):
  1194. statements = table.statements
  1195. while self.next_token_ != "}" or self.cur_comments_:
  1196. self.advance_lexer_(comments=True)
  1197. if self.cur_token_type_ is Lexer.COMMENT:
  1198. statements.append(
  1199. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1200. )
  1201. elif self.is_cur_keyword_("HorizAxis.BaseTagList"):
  1202. horiz_bases = self.parse_base_tag_list_()
  1203. elif self.is_cur_keyword_("HorizAxis.BaseScriptList"):
  1204. horiz_scripts = self.parse_base_script_list_(len(horiz_bases))
  1205. statements.append(
  1206. self.ast.BaseAxis(
  1207. horiz_bases,
  1208. horiz_scripts,
  1209. False,
  1210. location=self.cur_token_location_,
  1211. )
  1212. )
  1213. elif self.is_cur_keyword_("VertAxis.BaseTagList"):
  1214. vert_bases = self.parse_base_tag_list_()
  1215. elif self.is_cur_keyword_("VertAxis.BaseScriptList"):
  1216. vert_scripts = self.parse_base_script_list_(len(vert_bases))
  1217. statements.append(
  1218. self.ast.BaseAxis(
  1219. vert_bases,
  1220. vert_scripts,
  1221. True,
  1222. location=self.cur_token_location_,
  1223. )
  1224. )
  1225. elif self.cur_token_ == ";":
  1226. continue
  1227. def parse_table_OS_2_(self, table):
  1228. statements = table.statements
  1229. numbers = (
  1230. "FSType",
  1231. "TypoAscender",
  1232. "TypoDescender",
  1233. "TypoLineGap",
  1234. "winAscent",
  1235. "winDescent",
  1236. "XHeight",
  1237. "CapHeight",
  1238. "WeightClass",
  1239. "WidthClass",
  1240. "LowerOpSize",
  1241. "UpperOpSize",
  1242. )
  1243. ranges = ("UnicodeRange", "CodePageRange")
  1244. while self.next_token_ != "}" or self.cur_comments_:
  1245. self.advance_lexer_(comments=True)
  1246. if self.cur_token_type_ is Lexer.COMMENT:
  1247. statements.append(
  1248. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1249. )
  1250. elif self.cur_token_type_ is Lexer.NAME:
  1251. key = self.cur_token_.lower()
  1252. value = None
  1253. if self.cur_token_ in numbers:
  1254. value = self.expect_number_()
  1255. elif self.is_cur_keyword_("Panose"):
  1256. value = []
  1257. for i in range(10):
  1258. value.append(self.expect_number_())
  1259. elif self.cur_token_ in ranges:
  1260. value = []
  1261. while self.next_token_ != ";":
  1262. value.append(self.expect_number_())
  1263. elif self.is_cur_keyword_("Vendor"):
  1264. value = self.expect_string_()
  1265. statements.append(
  1266. self.ast.OS2Field(key, value, location=self.cur_token_location_)
  1267. )
  1268. elif self.cur_token_ == ";":
  1269. continue
  1270. def parse_STAT_ElidedFallbackName(self):
  1271. assert self.is_cur_keyword_("ElidedFallbackName")
  1272. self.expect_symbol_("{")
  1273. names = []
  1274. while self.next_token_ != "}" or self.cur_comments_:
  1275. self.advance_lexer_()
  1276. if self.is_cur_keyword_("name"):
  1277. platformID, platEncID, langID, string = self.parse_stat_name_()
  1278. nameRecord = self.ast.STATNameStatement(
  1279. "stat",
  1280. platformID,
  1281. platEncID,
  1282. langID,
  1283. string,
  1284. location=self.cur_token_location_,
  1285. )
  1286. names.append(nameRecord)
  1287. else:
  1288. if self.cur_token_ != ";":
  1289. raise FeatureLibError(
  1290. f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName",
  1291. self.cur_token_location_,
  1292. )
  1293. self.expect_symbol_("}")
  1294. if not names:
  1295. raise FeatureLibError('Expected "name"', self.cur_token_location_)
  1296. return names
  1297. def parse_STAT_design_axis(self):
  1298. assert self.is_cur_keyword_("DesignAxis")
  1299. names = []
  1300. axisTag = self.expect_tag_()
  1301. if (
  1302. axisTag not in ("ital", "opsz", "slnt", "wdth", "wght")
  1303. and not axisTag.isupper()
  1304. ):
  1305. log.warning(f"Unregistered axis tag {axisTag} should be uppercase.")
  1306. axisOrder = self.expect_number_()
  1307. self.expect_symbol_("{")
  1308. while self.next_token_ != "}" or self.cur_comments_:
  1309. self.advance_lexer_()
  1310. if self.cur_token_type_ is Lexer.COMMENT:
  1311. continue
  1312. elif self.is_cur_keyword_("name"):
  1313. location = self.cur_token_location_
  1314. platformID, platEncID, langID, string = self.parse_stat_name_()
  1315. name = self.ast.STATNameStatement(
  1316. "stat", platformID, platEncID, langID, string, location=location
  1317. )
  1318. names.append(name)
  1319. elif self.cur_token_ == ";":
  1320. continue
  1321. else:
  1322. raise FeatureLibError(
  1323. f'Expected "name", got {self.cur_token_}', self.cur_token_location_
  1324. )
  1325. self.expect_symbol_("}")
  1326. return self.ast.STATDesignAxisStatement(
  1327. axisTag, axisOrder, names, self.cur_token_location_
  1328. )
  1329. def parse_STAT_axis_value_(self):
  1330. assert self.is_cur_keyword_("AxisValue")
  1331. self.expect_symbol_("{")
  1332. locations = []
  1333. names = []
  1334. flags = 0
  1335. while self.next_token_ != "}" or self.cur_comments_:
  1336. self.advance_lexer_(comments=True)
  1337. if self.cur_token_type_ is Lexer.COMMENT:
  1338. continue
  1339. elif self.is_cur_keyword_("name"):
  1340. location = self.cur_token_location_
  1341. platformID, platEncID, langID, string = self.parse_stat_name_()
  1342. name = self.ast.STATNameStatement(
  1343. "stat", platformID, platEncID, langID, string, location=location
  1344. )
  1345. names.append(name)
  1346. elif self.is_cur_keyword_("location"):
  1347. location = self.parse_STAT_location()
  1348. locations.append(location)
  1349. elif self.is_cur_keyword_("flag"):
  1350. flags = self.expect_stat_flags()
  1351. elif self.cur_token_ == ";":
  1352. continue
  1353. else:
  1354. raise FeatureLibError(
  1355. f"Unexpected token {self.cur_token_} " f"in AxisValue",
  1356. self.cur_token_location_,
  1357. )
  1358. self.expect_symbol_("}")
  1359. if not names:
  1360. raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_)
  1361. if not locations:
  1362. raise FeatureLibError('Expected "Axis location"', self.cur_token_location_)
  1363. if len(locations) > 1:
  1364. for location in locations:
  1365. if len(location.values) > 1:
  1366. raise FeatureLibError(
  1367. "Only one value is allowed in a "
  1368. "Format 4 Axis Value Record, but "
  1369. f"{len(location.values)} were found.",
  1370. self.cur_token_location_,
  1371. )
  1372. format4_tags = []
  1373. for location in locations:
  1374. tag = location.tag
  1375. if tag in format4_tags:
  1376. raise FeatureLibError(
  1377. f"Axis tag {tag} already " "defined.", self.cur_token_location_
  1378. )
  1379. format4_tags.append(tag)
  1380. return self.ast.STATAxisValueStatement(
  1381. names, locations, flags, self.cur_token_location_
  1382. )
  1383. def parse_STAT_location(self):
  1384. values = []
  1385. tag = self.expect_tag_()
  1386. if len(tag.strip()) != 4:
  1387. raise FeatureLibError(
  1388. f"Axis tag {self.cur_token_} must be 4 " "characters",
  1389. self.cur_token_location_,
  1390. )
  1391. while self.next_token_ != ";":
  1392. if self.next_token_type_ is Lexer.FLOAT:
  1393. value = self.expect_float_()
  1394. values.append(value)
  1395. elif self.next_token_type_ is Lexer.NUMBER:
  1396. value = self.expect_number_()
  1397. values.append(value)
  1398. else:
  1399. raise FeatureLibError(
  1400. f'Unexpected value "{self.next_token_}". '
  1401. "Expected integer or float.",
  1402. self.next_token_location_,
  1403. )
  1404. if len(values) == 3:
  1405. nominal, min_val, max_val = values
  1406. if nominal < min_val or nominal > max_val:
  1407. raise FeatureLibError(
  1408. f"Default value {nominal} is outside "
  1409. f"of specified range "
  1410. f"{min_val}-{max_val}.",
  1411. self.next_token_location_,
  1412. )
  1413. return self.ast.AxisValueLocationStatement(tag, values)
  1414. def parse_table_STAT_(self, table):
  1415. statements = table.statements
  1416. design_axes = []
  1417. while self.next_token_ != "}" or self.cur_comments_:
  1418. self.advance_lexer_(comments=True)
  1419. if self.cur_token_type_ is Lexer.COMMENT:
  1420. statements.append(
  1421. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1422. )
  1423. elif self.cur_token_type_ is Lexer.NAME:
  1424. if self.is_cur_keyword_("ElidedFallbackName"):
  1425. names = self.parse_STAT_ElidedFallbackName()
  1426. statements.append(self.ast.ElidedFallbackName(names))
  1427. elif self.is_cur_keyword_("ElidedFallbackNameID"):
  1428. value = self.expect_number_()
  1429. statements.append(self.ast.ElidedFallbackNameID(value))
  1430. self.expect_symbol_(";")
  1431. elif self.is_cur_keyword_("DesignAxis"):
  1432. designAxis = self.parse_STAT_design_axis()
  1433. design_axes.append(designAxis.tag)
  1434. statements.append(designAxis)
  1435. self.expect_symbol_(";")
  1436. elif self.is_cur_keyword_("AxisValue"):
  1437. axisValueRecord = self.parse_STAT_axis_value_()
  1438. for location in axisValueRecord.locations:
  1439. if location.tag not in design_axes:
  1440. # Tag must be defined in a DesignAxis before it
  1441. # can be referenced
  1442. raise FeatureLibError(
  1443. "DesignAxis not defined for " f"{location.tag}.",
  1444. self.cur_token_location_,
  1445. )
  1446. statements.append(axisValueRecord)
  1447. self.expect_symbol_(";")
  1448. else:
  1449. raise FeatureLibError(
  1450. f"Unexpected token {self.cur_token_}", self.cur_token_location_
  1451. )
  1452. elif self.cur_token_ == ";":
  1453. continue
  1454. def parse_base_tag_list_(self):
  1455. # Parses BASE table entries. (See `section 9.a <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.a>`_)
  1456. assert self.cur_token_ in (
  1457. "HorizAxis.BaseTagList",
  1458. "VertAxis.BaseTagList",
  1459. ), self.cur_token_
  1460. bases = []
  1461. while self.next_token_ != ";":
  1462. bases.append(self.expect_script_tag_())
  1463. self.expect_symbol_(";")
  1464. return bases
  1465. def parse_base_script_list_(self, count):
  1466. assert self.cur_token_ in (
  1467. "HorizAxis.BaseScriptList",
  1468. "VertAxis.BaseScriptList",
  1469. ), self.cur_token_
  1470. scripts = [(self.parse_base_script_record_(count))]
  1471. while self.next_token_ == ",":
  1472. self.expect_symbol_(",")
  1473. scripts.append(self.parse_base_script_record_(count))
  1474. self.expect_symbol_(";")
  1475. return scripts
  1476. def parse_base_script_record_(self, count):
  1477. script_tag = self.expect_script_tag_()
  1478. base_tag = self.expect_script_tag_()
  1479. coords = [self.expect_number_() for i in range(count)]
  1480. return script_tag, base_tag, coords
  1481. def parse_device_(self):
  1482. result = None
  1483. self.expect_symbol_("<")
  1484. self.expect_keyword_("device")
  1485. if self.next_token_ == "NULL":
  1486. self.expect_keyword_("NULL")
  1487. else:
  1488. result = [(self.expect_number_(), self.expect_number_())]
  1489. while self.next_token_ == ",":
  1490. self.expect_symbol_(",")
  1491. result.append((self.expect_number_(), self.expect_number_()))
  1492. result = tuple(result) # make it hashable
  1493. self.expect_symbol_(">")
  1494. return result
  1495. def is_next_value_(self):
  1496. return (
  1497. self.next_token_type_ is Lexer.NUMBER
  1498. or self.next_token_ == "<"
  1499. or self.next_token_ == "("
  1500. )
  1501. def parse_valuerecord_(self, vertical):
  1502. if (
  1503. self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "("
  1504. ) or self.next_token_type_ is Lexer.NUMBER:
  1505. number, location = (
  1506. self.expect_number_(variable=True),
  1507. self.cur_token_location_,
  1508. )
  1509. if vertical:
  1510. val = self.ast.ValueRecord(
  1511. yAdvance=number, vertical=vertical, location=location
  1512. )
  1513. else:
  1514. val = self.ast.ValueRecord(
  1515. xAdvance=number, vertical=vertical, location=location
  1516. )
  1517. return val
  1518. self.expect_symbol_("<")
  1519. location = self.cur_token_location_
  1520. if self.next_token_type_ is Lexer.NAME:
  1521. name = self.expect_name_()
  1522. if name == "NULL":
  1523. self.expect_symbol_(">")
  1524. return self.ast.ValueRecord()
  1525. vrd = self.valuerecords_.resolve(name)
  1526. if vrd is None:
  1527. raise FeatureLibError(
  1528. 'Unknown valueRecordDef "%s"' % name, self.cur_token_location_
  1529. )
  1530. value = vrd.value
  1531. xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
  1532. xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
  1533. else:
  1534. xPlacement, yPlacement, xAdvance, yAdvance = (
  1535. self.expect_number_(variable=True),
  1536. self.expect_number_(variable=True),
  1537. self.expect_number_(variable=True),
  1538. self.expect_number_(variable=True),
  1539. )
  1540. if self.next_token_ == "<":
  1541. xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
  1542. self.parse_device_(),
  1543. self.parse_device_(),
  1544. self.parse_device_(),
  1545. self.parse_device_(),
  1546. )
  1547. allDeltas = sorted(
  1548. [
  1549. delta
  1550. for size, delta in (xPlaDevice if xPlaDevice else ())
  1551. + (yPlaDevice if yPlaDevice else ())
  1552. + (xAdvDevice if xAdvDevice else ())
  1553. + (yAdvDevice if yAdvDevice else ())
  1554. ]
  1555. )
  1556. if allDeltas[0] < -128 or allDeltas[-1] > 127:
  1557. raise FeatureLibError(
  1558. "Device value out of valid range (-128..127)",
  1559. self.cur_token_location_,
  1560. )
  1561. else:
  1562. xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None)
  1563. self.expect_symbol_(">")
  1564. return self.ast.ValueRecord(
  1565. xPlacement,
  1566. yPlacement,
  1567. xAdvance,
  1568. yAdvance,
  1569. xPlaDevice,
  1570. yPlaDevice,
  1571. xAdvDevice,
  1572. yAdvDevice,
  1573. vertical=vertical,
  1574. location=location,
  1575. )
  1576. def parse_valuerecord_definition_(self, vertical):
  1577. # Parses a named value record definition. (See section `2.e.v <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.v>`_)
  1578. assert self.is_cur_keyword_("valueRecordDef")
  1579. location = self.cur_token_location_
  1580. value = self.parse_valuerecord_(vertical)
  1581. name = self.expect_name_()
  1582. self.expect_symbol_(";")
  1583. vrd = self.ast.ValueRecordDefinition(name, value, location=location)
  1584. self.valuerecords_.define(name, vrd)
  1585. return vrd
  1586. def parse_languagesystem_(self):
  1587. assert self.cur_token_ == "languagesystem"
  1588. location = self.cur_token_location_
  1589. script = self.expect_script_tag_()
  1590. language = self.expect_language_tag_()
  1591. self.expect_symbol_(";")
  1592. return self.ast.LanguageSystemStatement(script, language, location=location)
  1593. def parse_feature_block_(self, variation=False):
  1594. if variation:
  1595. assert self.cur_token_ == "variation"
  1596. else:
  1597. assert self.cur_token_ == "feature"
  1598. location = self.cur_token_location_
  1599. tag = self.expect_tag_()
  1600. vertical = tag in {"vkrn", "vpal", "vhal", "valt"}
  1601. stylisticset = None
  1602. cv_feature = None
  1603. size_feature = False
  1604. if tag in self.SS_FEATURE_TAGS:
  1605. stylisticset = tag
  1606. elif tag in self.CV_FEATURE_TAGS:
  1607. cv_feature = tag
  1608. elif tag == "size":
  1609. size_feature = True
  1610. if variation:
  1611. conditionset = self.expect_name_()
  1612. use_extension = False
  1613. if self.next_token_ == "useExtension":
  1614. self.expect_keyword_("useExtension")
  1615. use_extension = True
  1616. if variation:
  1617. block = self.ast.VariationBlock(
  1618. tag, conditionset, use_extension=use_extension, location=location
  1619. )
  1620. else:
  1621. block = self.ast.FeatureBlock(
  1622. tag, use_extension=use_extension, location=location
  1623. )
  1624. self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature)
  1625. return block
  1626. def parse_feature_reference_(self):
  1627. assert self.cur_token_ == "feature", self.cur_token_
  1628. location = self.cur_token_location_
  1629. featureName = self.expect_tag_()
  1630. self.expect_symbol_(";")
  1631. return self.ast.FeatureReferenceStatement(featureName, location=location)
  1632. def parse_featureNames_(self, tag):
  1633. """Parses a ``featureNames`` statement found in stylistic set features.
  1634. See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_.
  1635. """
  1636. assert self.cur_token_ == "featureNames", self.cur_token_
  1637. block = self.ast.NestedBlock(
  1638. tag, self.cur_token_, location=self.cur_token_location_
  1639. )
  1640. self.expect_symbol_("{")
  1641. for symtab in self.symbol_tables_:
  1642. symtab.enter_scope()
  1643. while self.next_token_ != "}" or self.cur_comments_:
  1644. self.advance_lexer_(comments=True)
  1645. if self.cur_token_type_ is Lexer.COMMENT:
  1646. block.statements.append(
  1647. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1648. )
  1649. elif self.is_cur_keyword_("name"):
  1650. location = self.cur_token_location_
  1651. platformID, platEncID, langID, string = self.parse_name_()
  1652. block.statements.append(
  1653. self.ast.FeatureNameStatement(
  1654. tag, platformID, platEncID, langID, string, location=location
  1655. )
  1656. )
  1657. elif self.cur_token_ == ";":
  1658. continue
  1659. else:
  1660. raise FeatureLibError('Expected "name"', self.cur_token_location_)
  1661. self.expect_symbol_("}")
  1662. for symtab in self.symbol_tables_:
  1663. symtab.exit_scope()
  1664. self.expect_symbol_(";")
  1665. return block
  1666. def parse_cvParameters_(self, tag):
  1667. # Parses a ``cvParameters`` block found in Character Variant features.
  1668. # See section `8.d <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.d>`_.
  1669. assert self.cur_token_ == "cvParameters", self.cur_token_
  1670. block = self.ast.NestedBlock(
  1671. tag, self.cur_token_, location=self.cur_token_location_
  1672. )
  1673. self.expect_symbol_("{")
  1674. for symtab in self.symbol_tables_:
  1675. symtab.enter_scope()
  1676. statements = block.statements
  1677. while self.next_token_ != "}" or self.cur_comments_:
  1678. self.advance_lexer_(comments=True)
  1679. if self.cur_token_type_ is Lexer.COMMENT:
  1680. statements.append(
  1681. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1682. )
  1683. elif self.is_cur_keyword_(
  1684. {
  1685. "FeatUILabelNameID",
  1686. "FeatUITooltipTextNameID",
  1687. "SampleTextNameID",
  1688. "ParamUILabelNameID",
  1689. }
  1690. ):
  1691. statements.append(self.parse_cvNameIDs_(tag, self.cur_token_))
  1692. elif self.is_cur_keyword_("Character"):
  1693. statements.append(self.parse_cvCharacter_(tag))
  1694. elif self.cur_token_ == ";":
  1695. continue
  1696. else:
  1697. raise FeatureLibError(
  1698. "Expected statement: got {} {}".format(
  1699. self.cur_token_type_, self.cur_token_
  1700. ),
  1701. self.cur_token_location_,
  1702. )
  1703. self.expect_symbol_("}")
  1704. for symtab in self.symbol_tables_:
  1705. symtab.exit_scope()
  1706. self.expect_symbol_(";")
  1707. return block
  1708. def parse_cvNameIDs_(self, tag, block_name):
  1709. assert self.cur_token_ == block_name, self.cur_token_
  1710. block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_)
  1711. self.expect_symbol_("{")
  1712. for symtab in self.symbol_tables_:
  1713. symtab.enter_scope()
  1714. while self.next_token_ != "}" or self.cur_comments_:
  1715. self.advance_lexer_(comments=True)
  1716. if self.cur_token_type_ is Lexer.COMMENT:
  1717. block.statements.append(
  1718. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1719. )
  1720. elif self.is_cur_keyword_("name"):
  1721. location = self.cur_token_location_
  1722. platformID, platEncID, langID, string = self.parse_name_()
  1723. block.statements.append(
  1724. self.ast.CVParametersNameStatement(
  1725. tag,
  1726. platformID,
  1727. platEncID,
  1728. langID,
  1729. string,
  1730. block_name,
  1731. location=location,
  1732. )
  1733. )
  1734. elif self.cur_token_ == ";":
  1735. continue
  1736. else:
  1737. raise FeatureLibError('Expected "name"', self.cur_token_location_)
  1738. self.expect_symbol_("}")
  1739. for symtab in self.symbol_tables_:
  1740. symtab.exit_scope()
  1741. self.expect_symbol_(";")
  1742. return block
  1743. def parse_cvCharacter_(self, tag):
  1744. assert self.cur_token_ == "Character", self.cur_token_
  1745. location, character = self.cur_token_location_, self.expect_any_number_()
  1746. self.expect_symbol_(";")
  1747. if not (0xFFFFFF >= character >= 0):
  1748. raise FeatureLibError(
  1749. "Character value must be between "
  1750. "{:#x} and {:#x}".format(0, 0xFFFFFF),
  1751. location,
  1752. )
  1753. return self.ast.CharacterStatement(character, tag, location=location)
  1754. def parse_FontRevision_(self):
  1755. # Parses a ``FontRevision`` statement found in the head table. See
  1756. # `section 9.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.c>`_.
  1757. assert self.cur_token_ == "FontRevision", self.cur_token_
  1758. location, version = self.cur_token_location_, self.expect_float_()
  1759. self.expect_symbol_(";")
  1760. if version <= 0:
  1761. raise FeatureLibError("Font revision numbers must be positive", location)
  1762. return self.ast.FontRevisionStatement(version, location=location)
  1763. def parse_conditionset_(self):
  1764. name = self.expect_name_()
  1765. conditions = {}
  1766. self.expect_symbol_("{")
  1767. while self.next_token_ != "}":
  1768. self.advance_lexer_()
  1769. if self.cur_token_type_ is not Lexer.NAME:
  1770. raise FeatureLibError("Expected an axis name", self.cur_token_location_)
  1771. axis = self.cur_token_
  1772. if axis in conditions:
  1773. raise FeatureLibError(
  1774. f"Repeated condition for axis {axis}", self.cur_token_location_
  1775. )
  1776. if self.next_token_type_ is Lexer.FLOAT:
  1777. min_value = self.expect_float_()
  1778. elif self.next_token_type_ is Lexer.NUMBER:
  1779. min_value = self.expect_number_(variable=False)
  1780. if self.next_token_type_ is Lexer.FLOAT:
  1781. max_value = self.expect_float_()
  1782. elif self.next_token_type_ is Lexer.NUMBER:
  1783. max_value = self.expect_number_(variable=False)
  1784. self.expect_symbol_(";")
  1785. conditions[axis] = (min_value, max_value)
  1786. self.expect_symbol_("}")
  1787. finalname = self.expect_name_()
  1788. if finalname != name:
  1789. raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_)
  1790. return self.ast.ConditionsetStatement(name, conditions)
  1791. def parse_block_(
  1792. self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None
  1793. ):
  1794. self.expect_symbol_("{")
  1795. for symtab in self.symbol_tables_:
  1796. symtab.enter_scope()
  1797. statements = block.statements
  1798. while self.next_token_ != "}" or self.cur_comments_:
  1799. self.advance_lexer_(comments=True)
  1800. if self.cur_token_type_ is Lexer.COMMENT:
  1801. statements.append(
  1802. self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
  1803. )
  1804. elif self.cur_token_type_ is Lexer.GLYPHCLASS:
  1805. statements.append(self.parse_glyphclass_definition_())
  1806. elif self.is_cur_keyword_("anchorDef"):
  1807. statements.append(self.parse_anchordef_())
  1808. elif self.is_cur_keyword_({"enum", "enumerate"}):
  1809. statements.append(self.parse_enumerate_(vertical=vertical))
  1810. elif self.is_cur_keyword_("feature"):
  1811. statements.append(self.parse_feature_reference_())
  1812. elif self.is_cur_keyword_("ignore"):
  1813. statements.append(self.parse_ignore_())
  1814. elif self.is_cur_keyword_("language"):
  1815. statements.append(self.parse_language_())
  1816. elif self.is_cur_keyword_("lookup"):
  1817. statements.append(self.parse_lookup_(vertical))
  1818. elif self.is_cur_keyword_("lookupflag"):
  1819. statements.append(self.parse_lookupflag_())
  1820. elif self.is_cur_keyword_("markClass"):
  1821. statements.append(self.parse_markClass_())
  1822. elif self.is_cur_keyword_({"pos", "position"}):
  1823. statements.append(
  1824. self.parse_position_(enumerated=False, vertical=vertical)
  1825. )
  1826. elif self.is_cur_keyword_("script"):
  1827. statements.append(self.parse_script_())
  1828. elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}):
  1829. statements.append(self.parse_substitute_())
  1830. elif self.is_cur_keyword_("subtable"):
  1831. statements.append(self.parse_subtable_())
  1832. elif self.is_cur_keyword_("valueRecordDef"):
  1833. statements.append(self.parse_valuerecord_definition_(vertical))
  1834. elif stylisticset and self.is_cur_keyword_("featureNames"):
  1835. statements.append(self.parse_featureNames_(stylisticset))
  1836. elif cv_feature and self.is_cur_keyword_("cvParameters"):
  1837. statements.append(self.parse_cvParameters_(cv_feature))
  1838. elif size_feature and self.is_cur_keyword_("parameters"):
  1839. statements.append(self.parse_size_parameters_())
  1840. elif size_feature and self.is_cur_keyword_("sizemenuname"):
  1841. statements.append(self.parse_size_menuname_())
  1842. elif (
  1843. self.cur_token_type_ is Lexer.NAME
  1844. and self.cur_token_ in self.extensions
  1845. ):
  1846. statements.append(self.extensions[self.cur_token_](self))
  1847. elif self.cur_token_ == ";":
  1848. continue
  1849. else:
  1850. raise FeatureLibError(
  1851. "Expected glyph class definition or statement: got {} {}".format(
  1852. self.cur_token_type_, self.cur_token_
  1853. ),
  1854. self.cur_token_location_,
  1855. )
  1856. self.expect_symbol_("}")
  1857. for symtab in self.symbol_tables_:
  1858. symtab.exit_scope()
  1859. name = self.expect_name_()
  1860. if name != block.name.strip():
  1861. raise FeatureLibError(
  1862. 'Expected "%s"' % block.name.strip(), self.cur_token_location_
  1863. )
  1864. self.expect_symbol_(";")
  1865. # A multiple substitution may have a single destination, in which case
  1866. # it will look just like a single substitution. So if there are both
  1867. # multiple and single substitutions, upgrade all the single ones to
  1868. # multiple substitutions.
  1869. # Check if we have a mix of non-contextual singles and multiples.
  1870. has_single = False
  1871. has_multiple = False
  1872. for s in statements:
  1873. if isinstance(s, self.ast.SingleSubstStatement):
  1874. has_single = not any([s.prefix, s.suffix, s.forceChain])
  1875. elif isinstance(s, self.ast.MultipleSubstStatement):
  1876. has_multiple = not any([s.prefix, s.suffix, s.forceChain])
  1877. # Upgrade all single substitutions to multiple substitutions.
  1878. if has_single and has_multiple:
  1879. statements = []
  1880. for s in block.statements:
  1881. if isinstance(s, self.ast.SingleSubstStatement):
  1882. glyphs = s.glyphs[0].glyphSet()
  1883. replacements = s.replacements[0].glyphSet()
  1884. if len(replacements) == 1:
  1885. replacements *= len(glyphs)
  1886. for i, glyph in enumerate(glyphs):
  1887. statements.append(
  1888. self.ast.MultipleSubstStatement(
  1889. s.prefix,
  1890. glyph,
  1891. s.suffix,
  1892. [replacements[i]],
  1893. s.forceChain,
  1894. location=s.location,
  1895. )
  1896. )
  1897. else:
  1898. statements.append(s)
  1899. block.statements = statements
  1900. def is_cur_keyword_(self, k):
  1901. if self.cur_token_type_ is Lexer.NAME:
  1902. if isinstance(k, type("")): # basestring is gone in Python3
  1903. return self.cur_token_ == k
  1904. else:
  1905. return self.cur_token_ in k
  1906. return False
  1907. def expect_class_name_(self):
  1908. self.advance_lexer_()
  1909. if self.cur_token_type_ is not Lexer.GLYPHCLASS:
  1910. raise FeatureLibError("Expected @NAME", self.cur_token_location_)
  1911. return self.cur_token_
  1912. def expect_cid_(self):
  1913. self.advance_lexer_()
  1914. if self.cur_token_type_ is Lexer.CID:
  1915. return self.cur_token_
  1916. raise FeatureLibError("Expected a CID", self.cur_token_location_)
  1917. def expect_filename_(self):
  1918. self.advance_lexer_()
  1919. if self.cur_token_type_ is not Lexer.FILENAME:
  1920. raise FeatureLibError("Expected file name", self.cur_token_location_)
  1921. return self.cur_token_
  1922. def expect_glyph_(self):
  1923. self.advance_lexer_()
  1924. if self.cur_token_type_ is Lexer.NAME:
  1925. return self.cur_token_.lstrip("\\")
  1926. elif self.cur_token_type_ is Lexer.CID:
  1927. return "cid%05d" % self.cur_token_
  1928. raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_)
  1929. def check_glyph_name_in_glyph_set(self, *names):
  1930. """Adds a glyph name (just `start`) or glyph names of a
  1931. range (`start` and `end`) which are not in the glyph set
  1932. to the "missing list" for future error reporting.
  1933. If no glyph set is present, does nothing.
  1934. """
  1935. if self.glyphNames_:
  1936. for name in names:
  1937. if name in self.glyphNames_:
  1938. continue
  1939. if name not in self.missing:
  1940. self.missing[name] = self.cur_token_location_
  1941. def expect_markClass_reference_(self):
  1942. name = self.expect_class_name_()
  1943. mc = self.glyphclasses_.resolve(name)
  1944. if mc is None:
  1945. raise FeatureLibError(
  1946. "Unknown markClass @%s" % name, self.cur_token_location_
  1947. )
  1948. if not isinstance(mc, self.ast.MarkClass):
  1949. raise FeatureLibError(
  1950. "@%s is not a markClass" % name, self.cur_token_location_
  1951. )
  1952. return mc
  1953. def expect_tag_(self):
  1954. self.advance_lexer_()
  1955. if self.cur_token_type_ is not Lexer.NAME:
  1956. raise FeatureLibError("Expected a tag", self.cur_token_location_)
  1957. if len(self.cur_token_) > 4:
  1958. raise FeatureLibError(
  1959. "Tags cannot be longer than 4 characters", self.cur_token_location_
  1960. )
  1961. return (self.cur_token_ + " ")[:4]
  1962. def expect_script_tag_(self):
  1963. tag = self.expect_tag_()
  1964. if tag == "dflt":
  1965. raise FeatureLibError(
  1966. '"dflt" is not a valid script tag; use "DFLT" instead',
  1967. self.cur_token_location_,
  1968. )
  1969. return tag
  1970. def expect_language_tag_(self):
  1971. tag = self.expect_tag_()
  1972. if tag == "DFLT":
  1973. raise FeatureLibError(
  1974. '"DFLT" is not a valid language tag; use "dflt" instead',
  1975. self.cur_token_location_,
  1976. )
  1977. return tag
  1978. def expect_symbol_(self, symbol):
  1979. self.advance_lexer_()
  1980. if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
  1981. return symbol
  1982. raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_)
  1983. def expect_keyword_(self, keyword):
  1984. self.advance_lexer_()
  1985. if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
  1986. return self.cur_token_
  1987. raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_)
  1988. def expect_name_(self):
  1989. self.advance_lexer_()
  1990. if self.cur_token_type_ is Lexer.NAME:
  1991. return self.cur_token_
  1992. raise FeatureLibError("Expected a name", self.cur_token_location_)
  1993. def expect_number_(self, variable=False):
  1994. self.advance_lexer_()
  1995. if self.cur_token_type_ is Lexer.NUMBER:
  1996. return self.cur_token_
  1997. if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(":
  1998. return self.expect_variable_scalar_()
  1999. raise FeatureLibError("Expected a number", self.cur_token_location_)
  2000. def expect_variable_scalar_(self):
  2001. self.advance_lexer_() # "("
  2002. scalar = VariableScalar()
  2003. while True:
  2004. if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")":
  2005. break
  2006. location, value = self.expect_master_()
  2007. scalar.add_value(location, value)
  2008. return scalar
  2009. def expect_master_(self):
  2010. location = {}
  2011. while True:
  2012. if self.cur_token_type_ is not Lexer.NAME:
  2013. raise FeatureLibError("Expected an axis name", self.cur_token_location_)
  2014. axis = self.cur_token_
  2015. self.advance_lexer_()
  2016. if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="):
  2017. raise FeatureLibError(
  2018. "Expected an equals sign", self.cur_token_location_
  2019. )
  2020. value = self.expect_number_()
  2021. location[axis] = value
  2022. if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":":
  2023. # Lexer has just read the value as a glyph name. We'll correct it later
  2024. break
  2025. self.advance_lexer_()
  2026. if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","):
  2027. raise FeatureLibError(
  2028. "Expected an comma or an equals sign", self.cur_token_location_
  2029. )
  2030. self.advance_lexer_()
  2031. self.advance_lexer_()
  2032. value = int(self.cur_token_[1:])
  2033. self.advance_lexer_()
  2034. return location, value
  2035. def expect_any_number_(self):
  2036. self.advance_lexer_()
  2037. if self.cur_token_type_ in Lexer.NUMBERS:
  2038. return self.cur_token_
  2039. raise FeatureLibError(
  2040. "Expected a decimal, hexadecimal or octal number", self.cur_token_location_
  2041. )
  2042. def expect_float_(self):
  2043. self.advance_lexer_()
  2044. if self.cur_token_type_ is Lexer.FLOAT:
  2045. return self.cur_token_
  2046. raise FeatureLibError(
  2047. "Expected a floating-point number", self.cur_token_location_
  2048. )
  2049. def expect_decipoint_(self):
  2050. if self.next_token_type_ == Lexer.FLOAT:
  2051. return self.expect_float_()
  2052. elif self.next_token_type_ is Lexer.NUMBER:
  2053. return self.expect_number_() / 10
  2054. else:
  2055. raise FeatureLibError(
  2056. "Expected an integer or floating-point number", self.cur_token_location_
  2057. )
  2058. def expect_stat_flags(self):
  2059. value = 0
  2060. flags = {
  2061. "OlderSiblingFontAttribute": 1,
  2062. "ElidableAxisValueName": 2,
  2063. }
  2064. while self.next_token_ != ";":
  2065. if self.next_token_ in flags:
  2066. name = self.expect_name_()
  2067. value = value | flags[name]
  2068. else:
  2069. raise FeatureLibError(
  2070. f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_
  2071. )
  2072. return value
  2073. def expect_stat_values_(self):
  2074. if self.next_token_type_ == Lexer.FLOAT:
  2075. return self.expect_float_()
  2076. elif self.next_token_type_ is Lexer.NUMBER:
  2077. return self.expect_number_()
  2078. else:
  2079. raise FeatureLibError(
  2080. "Expected an integer or floating-point number", self.cur_token_location_
  2081. )
  2082. def expect_string_(self):
  2083. self.advance_lexer_()
  2084. if self.cur_token_type_ is Lexer.STRING:
  2085. return self.cur_token_
  2086. raise FeatureLibError("Expected a string", self.cur_token_location_)
  2087. def advance_lexer_(self, comments=False):
  2088. if comments and self.cur_comments_:
  2089. self.cur_token_type_ = Lexer.COMMENT
  2090. self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0)
  2091. return
  2092. else:
  2093. self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
  2094. self.next_token_type_,
  2095. self.next_token_,
  2096. self.next_token_location_,
  2097. )
  2098. while True:
  2099. try:
  2100. (
  2101. self.next_token_type_,
  2102. self.next_token_,
  2103. self.next_token_location_,
  2104. ) = next(self.lexer_)
  2105. except StopIteration:
  2106. self.next_token_type_, self.next_token_ = (None, None)
  2107. if self.next_token_type_ != Lexer.COMMENT:
  2108. break
  2109. self.cur_comments_.append((self.next_token_, self.next_token_location_))
  2110. @staticmethod
  2111. def reverse_string_(s):
  2112. """'abc' --> 'cba'"""
  2113. return "".join(reversed(list(s)))
  2114. def make_cid_range_(self, location, start, limit):
  2115. """(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]"""
  2116. result = list()
  2117. if start > limit:
  2118. raise FeatureLibError(
  2119. "Bad range: start should be less than limit", location
  2120. )
  2121. for cid in range(start, limit + 1):
  2122. result.append("cid%05d" % cid)
  2123. return result
  2124. def make_glyph_range_(self, location, start, limit):
  2125. """(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]"""
  2126. result = list()
  2127. if len(start) != len(limit):
  2128. raise FeatureLibError(
  2129. 'Bad range: "%s" and "%s" should have the same length' % (start, limit),
  2130. location,
  2131. )
  2132. rev = self.reverse_string_
  2133. prefix = os.path.commonprefix([start, limit])
  2134. suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
  2135. if len(suffix) > 0:
  2136. start_range = start[len(prefix) : -len(suffix)]
  2137. limit_range = limit[len(prefix) : -len(suffix)]
  2138. else:
  2139. start_range = start[len(prefix) :]
  2140. limit_range = limit[len(prefix) :]
  2141. if start_range >= limit_range:
  2142. raise FeatureLibError(
  2143. "Start of range must be smaller than its end", location
  2144. )
  2145. uppercase = re.compile(r"^[A-Z]$")
  2146. if uppercase.match(start_range) and uppercase.match(limit_range):
  2147. for c in range(ord(start_range), ord(limit_range) + 1):
  2148. result.append("%s%c%s" % (prefix, c, suffix))
  2149. return result
  2150. lowercase = re.compile(r"^[a-z]$")
  2151. if lowercase.match(start_range) and lowercase.match(limit_range):
  2152. for c in range(ord(start_range), ord(limit_range) + 1):
  2153. result.append("%s%c%s" % (prefix, c, suffix))
  2154. return result
  2155. digits = re.compile(r"^[0-9]{1,3}$")
  2156. if digits.match(start_range) and digits.match(limit_range):
  2157. for i in range(int(start_range, 10), int(limit_range, 10) + 1):
  2158. number = ("000" + str(i))[-len(start_range) :]
  2159. result.append("%s%s%s" % (prefix, number, suffix))
  2160. return result
  2161. raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location)
  2162. class SymbolTable(object):
  2163. def __init__(self):
  2164. self.scopes_ = [{}]
  2165. def enter_scope(self):
  2166. self.scopes_.append({})
  2167. def exit_scope(self):
  2168. self.scopes_.pop()
  2169. def define(self, name, item):
  2170. self.scopes_[-1][name] = item
  2171. def resolve(self, name):
  2172. for scope in reversed(self.scopes_):
  2173. item = scope.get(name)
  2174. if item:
  2175. return item
  2176. return None