exploration.parsing

  • Authors: Peter Mawhorter
  • Consulted:
  • Date: 2023-12-27
  • Purpose: Common code for parsing things, including conversions to custom string formats and JSON for some types.
   1"""
   2- Authors: Peter Mawhorter
   3- Consulted:
   4- Date: 2023-12-27
   5- Purpose: Common code for parsing things, including conversions to
   6    custom string formats and JSON for some types.
   7"""
   8
   9from typing import (
  10    Union, Dict, Optional, get_args, Tuple, List, cast, Set, TypeVar,
  11    Literal, TypeAlias, Generator, TypedDict, TextIO, Any, Callable,
  12    Type, Sequence
  13)
  14
  15import enum
  16import collections
  17import copy
  18import warnings
  19import json
  20
  21import networkx  # type: ignore
  22
  23from . import base
  24from . import core
  25from . import utils
  26from . import commands
  27
  28
  29#----------------#
  30# Format Details #
  31#----------------#
  32
  33Lexeme = enum.IntEnum(
  34    "Lexeme",
  35    [
  36        'domainSeparator',
  37        'zoneSeparator',
  38        'partSeparator',
  39        'stateOn',
  40        'stateOff',
  41        'tokenCount',
  42        'effectCharges',
  43        'sepOrDelay',
  44        'consequenceSeparator',
  45        'inCommon',
  46        'isHidden',
  47        'skillLevel',
  48        'wigglyLine',
  49        'withDetails',
  50        'reciprocalSeparator',
  51        'mechanismSeparator',
  52        'openCurly',
  53        'closeCurly',
  54        'openParen',
  55        'closeParen',
  56        'angleLeft',
  57        'angleRight',
  58        'doubleQuestionmark',
  59        'ampersand',
  60        'orBar',
  61        'notMarker',
  62    ]
  63)
  64"""
  65These are the different separators, grouping characters, and keywords
  66used as part of parsing. The characters that are actually recognized are
  67defined as part of a `Format`.
  68"""
  69
  70Format = Dict[Lexeme, str]
  71"""
  72A journal format is specified using a dictionary with keys that denote
  73journal marker types and values which are one-to-several-character
  74strings indicating the markup used for that entry/info type.
  75"""
  76
  77DEFAULT_FORMAT: Format = {
  78    # Separator
  79    Lexeme.domainSeparator: '//',
  80    Lexeme.zoneSeparator: '::',
  81    Lexeme.partSeparator: '%%',
  82    Lexeme.stateOn: '=on',  # TODO :Lexing issue!
  83    Lexeme.stateOff: '=off',
  84    Lexeme.tokenCount: '*',
  85    Lexeme.effectCharges: '=',
  86    Lexeme.sepOrDelay: ',',
  87    Lexeme.consequenceSeparator: ';',
  88    Lexeme.inCommon: '+c',
  89    Lexeme.isHidden: '+h',
  90    Lexeme.skillLevel: '^',
  91    Lexeme.wigglyLine: '~',
  92    Lexeme.withDetails: '%',
  93    Lexeme.reciprocalSeparator: '/',
  94    Lexeme.mechanismSeparator: ':',
  95    Lexeme.openCurly: '{',
  96    Lexeme.closeCurly: '}',
  97    Lexeme.openParen: '(',
  98    Lexeme.closeParen: ')',
  99    Lexeme.angleLeft: '<',
 100    Lexeme.angleRight: '>',
 101    Lexeme.doubleQuestionmark: '??',
 102    Lexeme.ampersand: '&',
 103    Lexeme.orBar: '|',
 104    Lexeme.notMarker: '!',
 105}
 106"""
 107The default parsing format.
 108"""
 109
 110DEFAULT_EFFECT_NAMES: Dict[str, base.EffectType] = {
 111    x: x for x in get_args(base.EffectType)
 112}
 113"""
 114Default names for each effect type. Maps names to canonical effect type
 115strings. A different mapping could be used to allow for writing effect
 116names in another language, for example.
 117"""
 118
 119DEFAULT_FOCALIZATION_NAMES: Dict[str, base.DomainFocalization] = {
 120    x: x for x in get_args(base.DomainFocalization)
 121}
 122"""
 123Default names for each domain focalization type. Maps each focalization
 124type string to itself.
 125"""
 126
 127DEFAULT_SF_INDICATORS: Tuple[str, str] = ('s', 'f')
 128"""
 129Default characters used to indicate success/failure when transcribing a
 130`TransitionWithOutcomes`.
 131"""
 132
 133
 134#-------------------#
 135# Errors & Warnings #
 136#-------------------#
 137
 138class ParseWarning(Warning):
 139    """
 140    Represents a warning encountered when parsing something.
 141    """
 142    pass
 143
 144
 145class ParseError(ValueError):
 146    """
 147    Represents a error encountered when parsing.
 148    """
 149    pass
 150
 151
 152class DotParseError(ParseError):
 153    """
 154    An error raised during parsing when incorrectly-formatted graphviz
 155    "dot" data is provided. See `parseDot`.
 156    """
 157    pass
 158
 159
 160class InvalidFeatureSpecifierError(ParseError):
 161    """
 162    An error used when a feature specifier is in the wrong format.
 163    Errors with part specifiers also use this.
 164    """
 165
 166
 167#--------#
 168# Lexing #
 169#--------#
 170
 171LexedTokens: TypeAlias = List[Union[Lexeme, str]]
 172"""
 173When lexing, we pull apart a string into pieces, but when we recognize
 174lexemes, we use their integer IDs in the list instead of strings, so we
 175get a list that's a mix of ints and strings.
 176"""
 177
 178GroupedTokens: TypeAlias = List[Union[Lexeme, str, 'GroupedTokens']]
 179"""
 180Some parsing processes group tokens into sub-lists. This type represents
 181`LexedTokens` which might also contain sub-lists, to arbitrary depth.
 182"""
 183
 184GroupedRequirementParts: TypeAlias = List[
 185    Union[Lexeme, base.Requirement, 'GroupedRequirementParts']
 186]
 187"""
 188Another intermediate parsing result during requirement parsing: a list
 189of `base.Requirements` possibly with some sub-lists and/or `Lexeme`s
 190mixed in.
 191"""
 192
 193
 194def lex(
 195    characters: str,
 196    tokenMap: Optional[Dict[str, Lexeme]] = None
 197) -> LexedTokens:
 198    """
 199    Lexes a list of tokens from a characters string. Recognizes any
 200    special characters you provide in the token map, as well as
 201    collections of non-mapped characters. Recognizes double-quoted
 202    strings which can contain any of those (and which use
 203    backslash-escapes for internal double quotes) and includes quoted
 204    versions of those strings as tokens (any token string starting with a
 205    double quote will be such a string). Breaks tokens on whitespace
 206    outside of quotation marks, and ignores that whitespace.
 207
 208    Examples:
 209
 210    >>> lex('abc')
 211    ['abc']
 212    >>> lex('(abc)', {'(': 0, ')': 1})
 213    [0, 'abc', 1]
 214    >>> lex('{(abc)}', {'(': 0, ')': 1, '{': 2, '}': 3})
 215    [2, 0, 'abc', 1, 3]
 216    >>> lex('abc def')
 217    ['abc', 'def']
 218    >>> lex('abc   def')
 219    ['abc', 'def']
 220    >>> lex('abc \\n def')
 221    ['abc', 'def']
 222    >>> lex ('"quoted"')
 223    ['"quoted"']
 224    >>> lex ('"quoted  pair"')
 225    ['"quoted  pair"']
 226    >>> lex ('  oneWord | "two words"|"three  words words" ', {'|': 0})
 227    ['oneWord', 0, '"two words"', 0, '"three  words words"']
 228    >>> tokenMap = { c: i for (i, c) in enumerate("(){}~:;>,") }
 229    >>> tokenMap['::'] = 9
 230    >>> tokenMap['~~'] = 10
 231    >>> lex(
 232    ...     '{~~2:best(brains, brawn)>{set switch on}'
 233    ...     '{deactivate ,1; bounce}}',
 234    ...     tokenMap
 235    ... )
 236    [2, 10, '2', 5, 'best', 0, 'brains', 8, 'brawn', 1, 7, 2, 'set',\
 237 'switch', 'on', 3, 2, 'deactivate', 8, '1', 6, 'bounce', 3, 3]
 238    >>> lex('set where::mechanism state', tokenMap)
 239    ['set', 'where', 9, 'mechanism', 'state']
 240    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
 241    >>> esc = r'"escape \\\\a"'
 242    >>> result = [ r'"escape \\\\a"' ]  # 'quoted' doubles the backslash
 243    >>> len(esc)
 244    12
 245    >>> len(result[0])
 246    12
 247    >>> lex(esc) == result
 248    True
 249    >>> quoteInQuote = r'before "hello \\\\ \\" goodbye"after'
 250    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
 251    >>> expect = ['before', r'"hello \\\\ \\" goodbye"', 'after']
 252    >>> lex(quoteInQuote) == expect
 253    True
 254    >>> lex('O\\'Neill')
 255    ["O'Neill"]
 256    >>> lex('one "quote ')
 257    ['one', '"quote "']
 258    >>> lex('geo*15', {'*': 0})
 259    ['geo', 0, '15']
 260    """
 261    if tokenMap is None:
 262        tokenMap = {}
 263    tokenStarts: Dict[str, List[str]] = {}
 264    for key in sorted(tokenMap.keys(), key=lambda x: -len(x)):
 265        tokenStarts.setdefault(key[:1], []).append(key)
 266    tokens: LexedTokens = []
 267    sofar = ''
 268    inQuote = False
 269    escaped = False
 270    skip = 0
 271    for i in range(len(characters)):
 272        if skip > 0:
 273            skip -= 1
 274            continue
 275
 276        char = characters[i]
 277        if escaped:
 278            # TODO: Escape sequences?
 279            sofar += char
 280            escaped = False
 281
 282        elif char == '\\':
 283            if inQuote:
 284                escaped = True
 285            else:
 286                sofar += char
 287
 288        elif char == '"':
 289            if sofar != '':
 290                if inQuote:
 291                    tokens.append(utils.quoted(sofar))
 292                else:
 293                    tokens.append(sofar)
 294            sofar = ''
 295            inQuote = not inQuote
 296
 297        elif inQuote:
 298            sofar += char
 299
 300        elif char in tokenStarts:
 301            options = tokenStarts[char]
 302            hit: Optional[str] = None
 303            for possibility in options:
 304                lp = len(possibility)
 305                if (
 306                    (lp == 1 and char == possibility)
 307                or characters[i:i + lp] == possibility
 308                ):
 309                    hit = possibility
 310                    break
 311
 312            if hit is not None:
 313                if sofar != '':
 314                    tokens.append(sofar)
 315                tokens.append(tokenMap[possibility])
 316                sofar = ''
 317                skip = len(hit) - 1
 318            else:  # Not actually a recognized token
 319                sofar += char
 320
 321        elif char.isspace():
 322            if sofar != '':
 323                tokens.append(sofar)
 324            sofar = ''
 325
 326        else:
 327            sofar += char
 328
 329    if sofar != '':
 330        if inQuote:
 331            tokens.append(utils.quoted(sofar))
 332        else:
 333            tokens.append(sofar)
 334
 335    return tokens
 336
 337
 338def unLex(
 339    tokens: LexedTokens,
 340    tokenMap: Optional[Dict[str, Lexeme]] = None
 341) -> str:
 342    """
 343    Turns lexed stuff back into a string, substituting strings back into
 344    token spots by reversing the given token map. Adds quotation marks to
 345    complex tokens where necessary to prevent them from re-lexing into
 346    multiple tokens (but `lex` doesn't  remove those, so in some cases
 347    there's not a perfect round-trip unLex -> lex).
 348
 349    For example:
 350
 351    >>> unLex(['a', 'b'])
 352    'a b'
 353    >>> tokens = {'(': 0, ')': 1, '{': 2, '}': 3, '::': 4}
 354    >>> unLex([0, 'hi', 1], tokens)
 355    '(hi)'
 356    >>> unLex([0, 'visit', 'zone', 4, 'decision', 1], tokens)
 357    '(visit zone::decision)'
 358    >>> q = unLex(['a complex token', '\\'single\\' and "double" quotes'])
 359    >>> q  # unLex adds quotes
 360    '"a complex token" "\\'single\\' and \\\\"double\\\\" quotes"'
 361    >>> lex(q)  # Not the same as the original list
 362    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
 363    >>> lex(unLex(lex(q)))  # But further round-trips work
 364    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
 365
 366    TODO: Fix this:
 367    For now, it generates incorrect results when token combinations can
 368    be ambiguous. These ambiguous token combinations should not ever be
 369    generated by `lex` at least. For example:
 370
 371    >>> ambiguous = {':': 0, '::': 1}
 372    >>> u = unLex(['a', 0, 0, 'b'], ambiguous)
 373    >>> u
 374    'a::b'
 375    >>> l = lex(u, ambiguous)
 376    >>> l
 377    ['a', 1, 'b']
 378    >>> l == u
 379    False
 380    """
 381    if tokenMap is None:
 382        nTokens = 0
 383        revMap = {}
 384    else:
 385        nTokens = len(tokenMap)
 386        revMap = {y: x for (x, y) in tokenMap.items()}
 387
 388    prevRaw = False
 389    # TODO: add spaces where necessary to disambiguate token sequences...
 390    if len(revMap) != nTokens:
 391        warnings.warn(
 392            (
 393                "Irreversible token map! Two or more tokens have the same"
 394                " integer value."
 395            ),
 396            ParseWarning
 397        )
 398
 399    result = ""
 400    for item in tokens:
 401        if isinstance(item, int):
 402            try:
 403                result += revMap[item]
 404            except KeyError:
 405                raise ValueError(
 406                    f"Tokens list contains {item} but the token map"
 407                    f" does not have any entry which maps to {item}."
 408                )
 409            prevRaw = False
 410        elif isinstance(item, str):
 411            if prevRaw:
 412                result += ' '
 413            if len(lex(item)) > 1:
 414                result += utils.quoted(item)
 415            else:
 416                result += item
 417            prevRaw = True
 418        else:
 419            raise TypeError(
 420                f"Token list contained non-int non-str item:"
 421                f" {repr(item)}"
 422            )
 423
 424    return result
 425
 426
 427#-------------------#
 428# ParseFormat class #
 429#-------------------#
 430
 431def normalizeEnds(
 432    tokens: List,
 433    start: int,
 434    end: int
 435) -> Tuple[int, int, int]:
 436    """
 437    Given a tokens list and start & end integers, does some bounds
 438    checking and normalization on the integers: converts negative
 439    indices to positive indices, and raises an `IndexError` if they're
 440    out-of-bounds after conversion. Returns a tuple containing the
 441    normalized start & end indices, along with the number of tokens they
 442    cover.
 443    """
 444    totalTokens = len(tokens)
 445    if start < -len(tokens):
 446        raise IndexError(
 447            f"Negative start index out of bounds (got {start} for"
 448            f" {totalTokens} tokens)."
 449        )
 450    elif start >= totalTokens:
 451        raise IndexError(
 452            f"Start index out of bounds (got {start} for"
 453            f" {totalTokens} tokens)."
 454        )
 455    elif start < 0:
 456        start = totalTokens + start
 457
 458    if end < -len(tokens):
 459        raise IndexError(
 460            f"Negative end index out of bounds (got {end} for"
 461            f" {totalTokens} tokens)."
 462        )
 463    elif end >= totalTokens:
 464        raise IndexError(
 465            f"Start index out of bounds (got {end} for"
 466            f" {totalTokens} tokens)."
 467        )
 468    elif end < 0:
 469        end = totalTokens + end
 470
 471    if end >= len(tokens):
 472        end = len(tokens) - 1
 473
 474    return (start, end, (end - start) + 1)
 475
 476
 477def findSeparatedParts(
 478    tokens: LexedTokens,
 479    sep: Union[str, int],
 480    start: int = 0,
 481    end: int = -1,
 482    groupStart: Union[str, int, None] = None,
 483    groupEnd: Union[str, int, None] = None
 484) -> Generator[Tuple[int, int], None, None]:
 485    """
 486    Finds parts separated by a separator lexeme, such as ';' or ',', but
 487    ignoring separators nested within groupStart/groupEnd pairs (if
 488    those arguments are supplied). For each token sequence found, yields
 489    a tuple containing the start index and end index for that part, with
 490    separators not included in the parts.
 491
 492    If two separators appear in a row, the start/end pair will have a
 493    start index one after the end index.
 494
 495    If there are no separators, yields one pair containing the start and
 496    end of the entire tokens sequence.
 497
 498    Raises a `ParseError` if there are unbalanced grouping elements.
 499
 500    For example:
 501
 502    >>> list(findSeparatedParts(
 503    ...     [ 'one' ],
 504    ...     Lexeme.sepOrDelay,
 505    ...     0,
 506    ...     0,
 507    ...     Lexeme.openParen,
 508    ...     Lexeme.closeParen
 509    ... ))
 510    [(0, 0)]
 511    >>> list(findSeparatedParts(
 512    ...     [
 513    ...         'best',
 514    ...         Lexeme.openParen,
 515    ...         'chess',
 516    ...         Lexeme.sepOrDelay,
 517    ...         'checkers',
 518    ...         Lexeme.closeParen
 519    ...     ],
 520    ...     Lexeme.sepOrDelay,
 521    ...     2,
 522    ...     4,
 523    ...     Lexeme.openParen,
 524    ...     Lexeme.closeParen
 525    ... ))
 526    [(2, 2), (4, 4)]
 527    """
 528    start, end, n = normalizeEnds(tokens, start, end)
 529    level = 0
 530    thisStart = start
 531    for i in range(start, end + 1):
 532        token = tokens[i]
 533        if token == sep and level == 0:
 534            yield (thisStart, i - 1)
 535            thisStart = i + 1
 536        elif token == groupStart:
 537            level += 1
 538        elif token == groupEnd:
 539            level -= 1
 540            if level < 0:
 541                raise ParseError("Unbalanced grouping tokens.")
 542    if level < 0:
 543        raise ParseError("Unbalanced grouping tokens.")
 544    yield (thisStart, end)
 545
 546
 547K = TypeVar('K')
 548"Type variable for dictionary keys."
 549V = TypeVar('V')
 550"Type variable for dictionary values."
 551
 552
 553def checkCompleteness(
 554    name,
 555    mapping: Dict[K, V],
 556    keysSet: Optional[Set[K]] = None,
 557    valuesSet: Optional[Set[V]] = None
 558):
 559    """
 560    Checks that a dictionary has a certain exact set of keys (or
 561    values). Raises a `ValueError` if it finds an extra or missing key
 562    or value.
 563    """
 564    if keysSet is not None:
 565        for key in mapping.keys():
 566            if key not in keysSet:
 567                raise ValueError("{name} has extra key {repr(key)}.")
 568
 569        for key in keysSet:
 570            if key not in mapping:
 571                raise ValueError("{name} is missing key {repr(key)}.")
 572
 573    if valuesSet is not None:
 574        for value in mapping.values():
 575            if value not in valuesSet:
 576                raise ValueError("{name} has extra value {repr(value)}.")
 577
 578        checkVals = mapping.values()
 579        for value in valuesSet:
 580            if value not in checkVals:
 581                raise ValueError("{name} is missing value {repr(value)}.")
 582
 583
 584class ParseFormat:
 585    """
 586    A ParseFormat manages the mapping from markers to entry types and
 587    vice versa.
 588    """
 589    def __init__(
 590        self,
 591        formatDict: Format = DEFAULT_FORMAT,
 592        effectNames: Dict[str, base.EffectType] = DEFAULT_EFFECT_NAMES,
 593        focalizationNames: Dict[
 594            str,
 595            base.DomainFocalization
 596        ] = DEFAULT_FOCALIZATION_NAMES,
 597        successFailureIndicators: Tuple[str, str] = DEFAULT_SF_INDICATORS
 598    ):
 599        """
 600        Sets up the parsing format. Requires a `Format` dictionary to
 601        define the specifics. Raises a `ValueError` unless the keys of
 602        the `Format` dictionary exactly match the `Lexeme` values.
 603        """
 604        self.formatDict = formatDict
 605        self.effectNames = effectNames
 606        self.focalizationNames = focalizationNames
 607        if (
 608            len(successFailureIndicators) != 2
 609        or any(len(i) != 1 for i in successFailureIndicators)
 610        ):
 611            raise ValueError(
 612                f"Invalid success/failure indicators: must be a pair of"
 613                f" length-1 strings. Got: {successFailureIndicators!r}"
 614            )
 615        self.successIndicator, self.failureIndicator = (
 616            successFailureIndicators
 617        )
 618
 619        # Check completeness for each dictionary
 620        checkCompleteness('formatDict', self.formatDict, set(Lexeme))
 621        checkCompleteness(
 622            'effectNames',
 623            self.effectNames,
 624            valuesSet=set(get_args(base.EffectType))
 625        )
 626        checkCompleteness(
 627            'focalizationNames',
 628            self.focalizationNames,
 629            valuesSet=set(get_args(base.DomainFocalization))
 630        )
 631
 632        # Build some reverse lookup dictionaries for specific
 633        self.reverseFormat = {y: x for (x, y) in self.formatDict.items()}
 634
 635        # circumstances:
 636        self.effectModMap = {
 637            self.formatDict[x]: x
 638            for x in [
 639                Lexeme.effectCharges,
 640                Lexeme.sepOrDelay,
 641                Lexeme.inCommon,
 642                Lexeme.isHidden
 643            ]
 644        }
 645
 646    def lex(self, content: str) -> LexedTokens:
 647        """
 648        Applies `lex` using this format's lexeme mapping.
 649        """
 650        return lex(content, self.reverseFormat)
 651
 652    def onOff(self, word: str) -> Optional[bool]:
 653        """
 654        Parse an on/off indicator and returns a boolean (`True` for on
 655        and `False` for off). Returns `None` if the word isn't either
 656        the 'on' or the 'off' word. Generates a `ParseWarning`
 657        (and still returns `None`) if the word is a case-swapped version
 658        of the 'on' or 'off' word and is not equal to either of them.
 659        """
 660        onWord = self.formatDict[Lexeme.stateOn]
 661        offWord = self.formatDict[Lexeme.stateOff]
 662
 663        # Generate warning if we suspect a case error
 664        if (
 665            word.casefold() in (onWord, offWord)
 666        and word not in (onWord, offWord)
 667        ):
 668            warnings.warn(
 669                (
 670                    f"Word '{word}' cannot be interpreted as an on/off"
 671                    f" value, although it is almost one (the correct"
 672                    f" values are '{onWord}' and '{offWord}'."
 673                ),
 674                ParseWarning
 675            )
 676
 677        # return the appropriate value
 678        if word == onWord:
 679            return True
 680        elif word == offWord:
 681            return False
 682        else:
 683            return None
 684
 685    def matchingBrace(
 686        self,
 687        tokens: LexedTokens,
 688        where: int,
 689        opener: int = Lexeme.openCurly,
 690        closer: int = Lexeme.closeCurly
 691    ) -> int:
 692        """
 693        Returns the index within the given tokens list of the closing
 694        curly brace which matches the open brace at the specified index.
 695        You can specify custom `opener` and/or `closer` lexemes to find
 696        matching pairs of other things. Raises a `ParseError` if there
 697        is no opening brace at the specified index, or if there isn't a
 698        matching closing brace. Handles nested braces of the specified
 699        type.
 700
 701        Examples:
 702        >>> pf = ParseFormat()
 703        >>> ob = Lexeme.openCurly
 704        >>> cb = Lexeme.closeCurly
 705        >>> pf.matchingBrace([ob, cb], 0)
 706        1
 707        >>> pf.matchingBrace([ob, cb], 1)
 708        Traceback (most recent call last):
 709          ...
 710        exploration.parsing.ParseError: ...
 711        >>> pf.matchingBrace(['hi', ob, cb], 0)
 712        Traceback (most recent call last):
 713          ...
 714        exploration.parsing.ParseError: ...
 715        >>> pf.matchingBrace(['hi', ob, cb], 1)
 716        2
 717        >>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
 718        3
 719        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
 720        Traceback (most recent call last):
 721          ...
 722        exploration.parsing.ParseError: ...
 723        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
 724        3
 725        >>> pf.matchingBrace([ob, ob, cb, cb], 0)
 726        3
 727        >>> pf.matchingBrace([ob, ob, cb, cb], 1)
 728        2
 729        >>> pf.matchingBrace([ob, cb, ob, cb], 0)
 730        1
 731        >>> pf.matchingBrace([ob, cb, ob, cb], 2)
 732        3
 733        >>> pf.matchingBrace([ob, cb, cb, cb], 0)
 734        1
 735        >>> pf.matchingBrace([ob, ob, ob, cb], 0)
 736        Traceback (most recent call last):
 737          ...
 738        exploration.parsing.ParseError: ...
 739        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
 740        7
 741        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
 742        6
 743        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
 744        Traceback (most recent call last):
 745          ...
 746        exploration.parsing.ParseError: ...
 747        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
 748        4
 749        >>> op = Lexeme.openParen
 750        >>> cp = Lexeme.closeParen
 751        >>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
 752        3
 753        """
 754        if where >= len(tokens):
 755            raise ParseError(
 756                f"Out-of-bounds brace start: index {where} with"
 757                f" {len(tokens)} tokens."
 758            )
 759        if tokens[where] != opener:
 760            raise ParseError(
 761                f"Can't find matching brace for token"
 762                f" {repr(tokens[where])} at index {where} because it's"
 763                f" not an open brace."
 764            )
 765
 766        level = 1
 767        for i in range(where + 1, len(tokens)):
 768            token = tokens[i]
 769            if token == opener:
 770                level += 1
 771            elif token == closer:
 772                level -= 1
 773                if level == 0:
 774                    return i
 775
 776        raise ParseError(
 777            f"Failed to find matching curly brace from index {where}."
 778        )
 779
 780    def parseFocalization(self, word: str) -> base.DomainFocalization:
 781        """
 782        Parses a focalization type for a domain, recognizing
 783        'domainFocalizationSingular', 'domainFocalizationPlural', and
 784        'domainFocalizationSpreading'.
 785        """
 786        try:
 787            return self.focalizationNames[word]
 788        except KeyError:
 789            raise ParseError(
 790                f"Invalid domain focalization name {repr(word)}. Valid"
 791                f" name are: {repr(list(self.focalizationNames))}'."
 792            )
 793
 794    def parseTagValue(self, value: str) -> base.TagValue:
 795        """
 796        Converts a string to a tag value, following these rules:
 797
 798        1. If the string is exactly one of 'None', 'True', or 'False', we
 799            convert it to the corresponding Python value.
 800        2. If the string can be converted to an integer without raising a
 801            ValueError, we use that integer.
 802        3. If the string can be converted to a float without raising a
 803            ValueError, we use that float.
 804        4. Otherwise, it remains a string.
 805
 806        Note that there is currently no syntax for using list, dictionary,
 807        Requirement, or Consequence tag values.
 808        TODO: Support those types?
 809
 810        Examples:
 811
 812        >>> pf = ParseFormat()
 813        >>> pf.parseTagValue('hi')
 814        'hi'
 815        >>> pf.parseTagValue('3')
 816        3
 817        >>> pf.parseTagValue('3.0')
 818        3.0
 819        >>> pf.parseTagValue('True')
 820        True
 821        >>> pf.parseTagValue('False')
 822        False
 823        >>> pf.parseTagValue('None') is None
 824        True
 825        >>> pf.parseTagValue('none')
 826        'none'
 827        """
 828        # TODO: Allow these keywords to be redefined?
 829        if value == 'True':
 830            return True
 831        elif value == 'False':
 832            return False
 833        elif value == 'None':
 834            return None
 835        else:
 836            try:
 837                return int(value)
 838            except ValueError:
 839                try:
 840                    return float(value)
 841                except ValueError:
 842                    return value
 843
 844    def unparseTagValue(self, value: base.TagValue) -> str:
 845        """
 846        Converts a tag value into a string that would be parsed back into a
 847        tag value via `parseTagValue`. Currently does not work for list,
 848        dictionary, Requirement, or Consequence values.
 849        TODO: Those
 850        """
 851        return str(value)
 852
 853    def hasZoneParts(self, name: str) -> bool:
 854        """
 855        Returns true if the specified name contains zone parts (using
 856        the `zoneSeparator`).
 857        """
 858        return self.formatDict[Lexeme.zoneSeparator] in name
 859
 860    def splitZone(
 861        self,
 862        name: str
 863    ) -> Tuple[List[base.Zone], base.DecisionName]:
 864        """
 865        Splits a decision name that includes zone information into the
 866        list-of-zones part and the decision part. If there is no zone
 867        information in the name, the list-of-zones will be an empty
 868        list.
 869        """
 870        sep = self.formatDict[Lexeme.zoneSeparator]
 871        parts = name.split(sep)
 872        return (list(parts[:-1]), parts[-1])
 873
 874    def prefixWithZone(
 875        self,
 876        name: base.DecisionName,
 877        zone: base.Zone
 878    ) -> base.DecisionName:
 879        """
 880        Returns the given decision name, prefixed with the given zone
 881        name. Does NOT check whether the decision name already includes
 882        a prefix or not.
 883        """
 884        return zone + self.formatDict[Lexeme.zoneSeparator] + name
 885
 886    def parseAnyTransitionFromTokens(
 887        self,
 888        tokens: LexedTokens,
 889        start: int = 0
 890    ) -> Tuple[base.TransitionWithOutcomes, int]:
 891        """
 892        Parses a `base.TransitionWithOutcomes` from a tokens list,
 893        accepting either a transition name or a transition name followed
 894        by a `Lexeme.withDetails` followed by a string of success and
 895        failure indicator characters. Returns a tuple containing a
 896        `base.TransitionWithOutcomes` and an integer indicating the end
 897        index of the parsed item within the tokens.
 898        """
 899        # Normalize start index so we can do index math
 900        if start < 0:
 901            useIndex = len(tokens) + start
 902        else:
 903            useIndex = start
 904
 905        try:
 906            first = tokens[useIndex]
 907        except IndexError:
 908            raise ParseError(
 909                f"Invalid token index: {start!r} among {len(tokens)}"
 910                f" tokens."
 911            )
 912
 913        if isinstance(first, Lexeme):
 914            raise ParseError(
 915                f"Expecting a transition name (possibly with a"
 916                f" success/failure indicator string) but first token is"
 917                f" {first!r}."
 918            )
 919
 920        try:
 921            second = tokens[useIndex + 1]
 922            third = tokens[useIndex + 2]
 923        except IndexError:
 924            return ((first, []), useIndex)
 925
 926        if second != Lexeme.withDetails or isinstance(third, Lexeme):
 927            return ((first, []), useIndex)
 928
 929        outcomes = []
 930        for char in third:
 931            if char == self.successIndicator:
 932                outcomes.append(True)
 933            elif char == self.failureIndicator:
 934                outcomes.append(False)
 935            else:
 936                return ((first, []), useIndex)
 937
 938        return ((first, outcomes), useIndex + 2)
 939
 940    def parseTransitionWithOutcomes(
 941        self,
 942        content: str
 943    ) -> base.TransitionWithOutcomes:
 944        """
 945        Takes a transition that may have outcomes listed as a series of
 946        s/f strings after a colon and returns the corresponding
 947        `TransitionWithOutcomes` tuple. Calls `lex` and then
 948        `parseAnyTransitionFromTokens`.
 949        """
 950        return self.parseAnyTransitionFromTokens(self.lex(content))[0]
 951
 952    def unparseTransitionWithOutocmes(
 953        self,
 954        transition: base.AnyTransition
 955    ) -> str:
 956        """
 957        Turns a `base.AnyTransition` back into a string that would parse
 958        to an equivalent `base.TransitionWithOutcomes` via
 959        `parseTransitionWithOutcomes`. If a bare `base.Transition` is
 960        given, returns a string that would result in a
 961        `base.TransitionWithOutcomes` that has an empty outcomes
 962        sequence.
 963        """
 964        if isinstance(transition, base.Transition):
 965            return transition
 966        elif (
 967            isinstance(transition, tuple)
 968        and len(transition) == 2
 969        and isinstance(transition[0], base.Transition)
 970        and isinstance(transition[1], list)
 971        and all(isinstance(sfi, bool) for sfi in transition[1])
 972        ):
 973            if len(transition[1]) == 0:
 974                return transition[0]
 975            else:
 976                result = transition[0] + self.formatDict[Lexeme.withDetails]
 977                for outcome in transition[1]:
 978                    if outcome:
 979                        result += self.successIndicator
 980                    else:
 981                        result += self.failureIndicator
 982                return result
 983        else:
 984            raise TypeError(
 985                f"Invalid AnyTransition: neither a string, nor a"
 986                f" length-2 tuple consisting of a string followed by a"
 987                f" list of booleans. Got: {transition!r}"
 988            )
 989
 990    def parseSpecificTransition(
 991        self,
 992        content: str
 993    ) -> Tuple[base.DecisionName, base.Transition]:
 994        """
 995        Splits a decision:transition pair to the decision and transition
 996        part, using a custom separator if one is defined.
 997        """
 998        sep = self.formatDict[Lexeme.withDetails]
 999        n = content.count(sep)
1000        if n == 0:
1001            raise ParseError(
1002                f"Cannot split '{content}' into a decision name and a"
1003                f" transition name (no separator '{sep}' found)."
1004            )
1005        elif n > 1:
1006            raise ParseError(
1007                f"Cannot split '{content}' into a decision name and a"
1008                f" transition name (too many ({n}) '{sep}' separators"
1009                f" found)."
1010            )
1011        else:
1012            return cast(
1013                Tuple[base.DecisionName, base.Transition],
1014                tuple(content.split(sep))
1015            )
1016
1017    def splitDirections(
1018        self,
1019        content: str
1020    ) -> Tuple[Optional[str], Optional[str]]:
1021        """
1022        Splits a piece of text using the 'Lexeme.reciprocalSeparator'
1023        into two pieces. If there is no separator, the second piece will
1024        be `None`; if either side of the separator is blank, that side
1025        will be `None`, and if there is more than one separator, a
1026        `ParseError` will be raised. Whitespace will be stripped from
1027        both sides of each result.
1028
1029        Examples:
1030
1031        >>> pf = ParseFormat()
1032        >>> pf.splitDirections('abc / def')
1033        ('abc', 'def')
1034        >>> pf.splitDirections('abc def ')
1035        ('abc def', None)
1036        >>> pf.splitDirections('abc def /')
1037        ('abc def', None)
1038        >>> pf.splitDirections('/abc def')
1039        (None, 'abc def')
1040        >>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
1041        Traceback (most recent call last):
1042          ...
1043        ParseError: ...
1044        """
1045        sep = self.formatDict[Lexeme.reciprocalSeparator]
1046        count = content.count(sep)
1047        if count > 1:
1048            raise ParseError(
1049                f"Too many split points ('{sep}') in content:"
1050                f" '{content}' (only one is allowed)."
1051            )
1052
1053        elif count == 1:
1054            before, after = content.split(sep)
1055            before = before.strip()
1056            after = after.strip()
1057            return (before or None, after or None)
1058
1059        else: # no split points
1060            stripped = content.strip()
1061            if stripped:
1062                return stripped, None
1063            else:
1064                return None, None
1065
1066    def parseItem(
1067        self,
1068        item: str
1069    ) -> Union[
1070        base.Capability,
1071        Tuple[base.Token, int],
1072        Tuple[base.MechanismName, base.MechanismState]
1073    ]:
1074        """
1075        Parses an item, which is a capability (just a string), a
1076        token-type*number pair (returned as a tuple with the number
1077        converted to an integer), or a mechanism-name:state pair
1078        (returned as a tuple with the state as a string). The
1079        'Lexeme.tokenCount' and `Lexeme.mechanismSeparator` format
1080        values determine the separators that this looks for.
1081        """
1082        tsep = self.formatDict[Lexeme.tokenCount]
1083        msep = self.formatDict[Lexeme.mechanismSeparator]
1084        if tsep in item:
1085            # It's a token w/ an associated count
1086            parts = item.split(tsep)
1087            if len(parts) != 2:
1088                raise ParseError(
1089                    f"Item '{item}' has a '{tsep}' but doesn't separate"
1090                    f" into a token type and a count."
1091                )
1092            typ, count = parts
1093            try:
1094                num = int(count)
1095            except ValueError:
1096                raise ParseError(
1097                    f"Item '{item}' has invalid token count '{count}'."
1098                )
1099
1100            return (typ, num)
1101        elif msep in item:
1102            parts = item.split(msep)
1103            mechanism = msep.join(parts[:-1])
1104            state = parts[-1]
1105            if mechanism.endswith(':'):
1106                # Just a zone-qualified name...
1107                return item
1108            else:
1109                return (mechanism, state)
1110        else:
1111            # It's just a capability
1112            return item
1113
1114    def unparseDecisionSpecifier(self, spec: base.DecisionSpecifier) -> str:
1115        """
1116        Turns a decision specifier back into a string, which would be
1117        parsed as a decision specifier as part of various different
1118        things.
1119
1120        For example:
1121
1122        >>> pf = ParseFormat()
1123        >>> pf.unparseDecisionSpecifier(
1124        ...     base.DecisionSpecifier(None, None, 'where')
1125        ... )
1126        'where'
1127        >>> pf.unparseDecisionSpecifier(
1128        ...     base.DecisionSpecifier(None, 'zone', 'where')
1129        ... )
1130        'zone::where'
1131        >>> pf.unparseDecisionSpecifier(
1132        ...     base.DecisionSpecifier('domain', 'zone', 'where')
1133        ... )
1134        'domain//zone::where'
1135        >>> pf.unparseDecisionSpecifier(
1136        ...     base.DecisionSpecifier('domain', None, 'where')
1137        ... )
1138        'domain//where'
1139        """
1140        result = spec.name
1141        if spec.zone is not None:
1142            result = (
1143                spec.zone
1144              + self.formatDict[Lexeme.zoneSeparator]
1145              + result
1146            )
1147        if spec.domain is not None:
1148            result = (
1149                spec.domain
1150              + self.formatDict[Lexeme.domainSeparator]
1151              + result
1152            )
1153        return result
1154
1155    def unparseMechanismSpecifier(
1156        self,
1157        spec: base.MechanismSpecifier
1158    ) -> str:
1159        """
1160        Turns a mechanism specifier back into a string, which would be
1161        parsed as a mechanism specifier as part of various different
1162        things. Note that a mechanism specifier with a zone part but no
1163        decision part is not valid, since it would parse as a decision
1164        part instead.
1165
1166        For example:
1167
1168        >>> pf = ParseFormat()
1169        >>> pf.unparseMechanismSpecifier(
1170        ...     base.MechanismSpecifier(None, None, None, 'lever')
1171        ... )
1172        'lever'
1173        >>> pf.unparseMechanismSpecifier(
1174        ...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
1175        ... )
1176        'domain//zone::decision::door'
1177        >>> pf.unparseMechanismSpecifier(
1178        ...     base.MechanismSpecifier('domain', None, None, 'door')
1179        ... )
1180        'domain//door'
1181        >>> pf.unparseMechanismSpecifier(
1182        ...     base.MechanismSpecifier(None, 'a', 'b', 'door')
1183        ... )
1184        'a::b::door'
1185        >>> pf.unparseMechanismSpecifier(
1186        ...     base.MechanismSpecifier(None, 'a', None, 'door')
1187        ... )
1188        Traceback (most recent call last):
1189        ...
1190        exploration.base.InvalidMechanismSpecifierError...
1191        >>> pf.unparseMechanismSpecifier(
1192        ...     base.MechanismSpecifier(None, None, 'a', 'door')
1193        ... )
1194        'a::door'
1195        """
1196        if spec.decision is None and spec.zone is not None:
1197            raise base.InvalidMechanismSpecifierError(
1198                f"Mechanism specifier has a zone part but no decision"
1199                f" part; it cannot be unparsed since it would parse"
1200                f" differently:\n{spec}"
1201            )
1202        result = spec.name
1203        if spec.decision is not None:
1204            result = (
1205                spec.decision
1206              + self.formatDict[Lexeme.zoneSeparator]
1207              + result
1208            )
1209        if spec.zone is not None:
1210            result = (
1211                spec.zone
1212              + self.formatDict[Lexeme.zoneSeparator]
1213              + result
1214            )
1215        if spec.domain is not None:
1216            result = (
1217                spec.domain
1218              + self.formatDict[Lexeme.domainSeparator]
1219              + result
1220            )
1221        return result
1222
1223    def effectType(self, effectMarker: str) -> Optional[base.EffectType]:
1224        """
1225        Returns the `base.EffectType` string corresponding to the
1226        given effect marker string. Returns `None` for an unrecognized
1227        marker.
1228        """
1229        return self.effectNames.get(effectMarker)
1230
1231    def parseCommandFromTokens(
1232        self,
1233        tokens: LexedTokens,
1234        start: int = 0,
1235        end: int = -1
1236    ) -> commands.Command:
1237        """
1238        Given tokens that specify a `commands.Command`, parses that
1239        command and returns it. Really just turns the tokens back into
1240        strings and calls `commands.command`.
1241
1242        For example:
1243
1244        >>> pf = ParseFormat()
1245        >>> t = ['val', '5']
1246        >>> c = commands.command(*t)
1247        >>> pf.parseCommandFromTokens(t) == c
1248        True
1249        >>> t = ['op', Lexeme.tokenCount, '$val', '$val']
1250        >>> c = commands.command('op', '*', '$val', '$val')
1251        >>> pf.parseCommandFromTokens(t) == c
1252        True
1253        """
1254        start, end, nTokens = normalizeEnds(tokens, start, end)
1255        args: List[str] = []
1256        for token in tokens[start:end + 1]:
1257            if isinstance(token, Lexeme):
1258                args.append(self.formatDict[token])
1259            else:
1260                args.append(token)
1261
1262        if len(args) == 0:
1263            raise ParseError(
1264                f"No arguments for command:\n{tokens[start:end + 1]}"
1265            )
1266        return commands.command(*args)
1267
1268    def unparseCommand(self, command: commands.Command) -> str:
1269        """
1270        Turns a `Command` back into the string that would produce that
1271        command when parsed using `parseCommandList`.
1272
1273        Note that the results will be more explicit in some cases than what
1274        `parseCommandList` would accept as input.
1275
1276        For example:
1277
1278        >>> pf = ParseFormat()
1279        >>> pf.unparseCommand(
1280        ...     commands.LiteralValue(command='val', value='5')
1281        ... )
1282        'val 5'
1283        >>> pf.unparseCommand(
1284        ...     commands.LiteralValue(command='val', value='"5"')
1285        ... )
1286        'val "5"'
1287        >>> pf.unparseCommand(
1288        ...     commands.EstablishCollection(
1289        ...         command='empty',
1290        ...         collection='list'
1291        ...     )
1292        ... )
1293        'empty list'
1294        >>> pf.unparseCommand(
1295        ...     commands.AppendValue(command='append', value='$_')
1296        ... )
1297        'append $_'
1298        """
1299        candidate = None
1300        for k, v in commands.COMMAND_SETUP.items():
1301            if v[0] == type(command):
1302                if candidate is None:
1303                    candidate = k
1304                else:
1305                    raise ValueError(
1306                        f"COMMAND_SETUP includes multiple keys with"
1307                        f" {type(command)} as their value type:"
1308                        f" '{candidate}' and '{k}'."
1309                    )
1310
1311        if candidate is None:
1312            raise ValueError(
1313                f"COMMAND_SETUP has no key with {type(command)} as its"
1314                f" value type."
1315            )
1316
1317        result = candidate
1318        for x in command[1:]:
1319            # TODO: Is this hack good enough?
1320            result += ' ' + str(x)
1321        return result
1322
1323    def unparseCommandList(self, commands: List[commands.Command]) -> str:
1324        """
1325        Takes a list of commands and returns a string that would parse
1326        into them using `parseOneEffectArg`. The result contains
1327        newlines and indentation to make it easier to read.
1328
1329        For example:
1330
1331        >>> pf = ParseFormat()
1332        >>> pf.unparseCommandList(
1333        ...     [commands.command('val', '5'), commands.command('pop')]
1334        ... )
1335        '{\\n  val 5;\\n  pop;\\n}'
1336        """
1337        result = self.formatDict[Lexeme.openCurly]
1338        for cmd in commands:
1339            result += f'\n  {self.unparseCommand(cmd)};'
1340        if len(commands) > 0:
1341            result += '\n'
1342        return result + self.formatDict[Lexeme.closeCurly]
1343
1344    def parseCommandListFromTokens(
1345        self,
1346        tokens: LexedTokens,
1347        start: int = 0
1348    ) -> Tuple[List[commands.Command], int]:
1349        """
1350        Parses a command list from a list of lexed tokens, which must
1351        start with `Lexeme.openCurly`. Returns the parsed command list
1352        as a list of `commands.Command` objects, along with the end
1353        index of that command list (which will be the matching curly
1354        brace.
1355        """
1356        end = self.matchingBrace(
1357            tokens,
1358            start,
1359            Lexeme.openCurly,
1360            Lexeme.closeCurly
1361        )
1362        parts = list(
1363            findSeparatedParts(
1364                tokens,
1365                Lexeme.consequenceSeparator,
1366                start + 1,
1367                end - 1,
1368                Lexeme.openCurly,
1369                Lexeme.closeCurly,
1370            )
1371        )
1372        return (
1373            [
1374                self.parseCommandFromTokens(tokens, fromIndex, toIndex)
1375                for fromIndex, toIndex in parts
1376                if fromIndex <= toIndex  # ignore empty parts
1377            ],
1378            end
1379        )
1380
1381    def parseOneEffectArg(
1382        self,
1383        tokens: LexedTokens,
1384        start: int = 0,
1385        limit: Optional[int] = None
1386    ) -> Tuple[
1387        Union[
1388            base.Capability,  # covers 'str' possibility
1389            Tuple[base.Token, base.TokenCount],
1390            Tuple[Literal['skill'], base.Skill, base.Level],
1391            Tuple[base.MechanismSpecifier, base.MechanismState],
1392            base.DecisionSpecifier,
1393            base.DecisionID,
1394            Literal[Lexeme.inCommon, Lexeme.isHidden],
1395            Tuple[Literal[Lexeme.sepOrDelay, Lexeme.effectCharges], int],
1396            List[commands.Command]
1397        ],
1398        int
1399    ]:
1400        """
1401        Looks at tokens starting at the specified position and parses
1402        one or more of them as an effect argument (an argument that
1403        could be given to `base.effect`). Looks at various key `Lexeme`s
1404        to determine which type to use.
1405
1406        Items in the tokens list beyond the specified limit will not be
1407        considered, even when they in theory could be grouped with items
1408        up to the limit into a more complex argument.
1409
1410        For example:
1411
1412        >>> pf = ParseFormat()
1413        >>> pf.parseOneEffectArg(['hi'])
1414        ('hi', 0)
1415        >>> pf.parseOneEffectArg(['hi'], 1)
1416        Traceback (most recent call last):
1417        ...
1418        IndexError...
1419        >>> pf.parseOneEffectArg(['hi', 'bye'])
1420        ('hi', 0)
1421        >>> pf.parseOneEffectArg(['hi', 'bye'], 1)
1422        ('bye', 1)
1423        >>> pf.parseOneEffectArg(
1424        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1425        ...     0
1426        ... )
1427        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1428 name='gate'), 'open'), 2)
1429        >>> pf.parseOneEffectArg(
1430        ...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
1431        ...     1
1432        ... )
1433        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1434 name='gate'), 'open'), 3)
1435        >>> pf.parseOneEffectArg(
1436        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1437        ...     1
1438        ... )
1439        Traceback (most recent call last):
1440        ...
1441        exploration.parsing.ParseError...
1442        >>> pf.parseOneEffectArg(
1443        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1444        ...     2
1445        ... )
1446        ('open', 2)
1447        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
1448        (('gold', 10), 2)
1449        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
1450        Traceback (most recent call last):
1451        ...
1452        exploration.parsing.ParseError...
1453        >>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
1454        (<Lexeme.inCommon: ...>, 0)
1455        >>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
1456        (<Lexeme.isHidden: ...>, 0)
1457        >>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
1458        Traceback (most recent call last):
1459        ...
1460        exploration.parsing.ParseError...
1461        >>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
1462        ((<Lexeme.effectCharges: ...>, 3), 1)
1463        >>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
1464        Traceback (most recent call last):
1465        ...
1466        exploration.parsing.ParseError...
1467        >>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
1468        ((<Lexeme.sepOrDelay: ...>, -2), 1)
1469        >>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
1470        (('skill', 'agility', 3), 2)
1471        >>> pf.parseOneEffectArg(
1472        ...     [
1473        ...         'main',
1474        ...         Lexeme.domainSeparator,
1475        ...         'zone',
1476        ...         Lexeme.zoneSeparator,
1477        ...         'decision',
1478        ...         Lexeme.zoneSeparator,
1479        ...         'compass',
1480        ...         Lexeme.mechanismSeparator,
1481        ...         'north',
1482        ...         'south',
1483        ...         'east',
1484        ...         'west'
1485        ...     ],
1486        ...     0
1487        ... )
1488        ((MechanismSpecifier(domain='main', zone='zone',\
1489 decision='decision', name='compass'), 'north'), 8)
1490        >>> pf.parseOneEffectArg(
1491        ...     [
1492        ...         'before',
1493        ...         'main',
1494        ...         Lexeme.domainSeparator,
1495        ...         'zone',
1496        ...         Lexeme.zoneSeparator,
1497        ...         'decision',
1498        ...         Lexeme.zoneSeparator,
1499        ...         'compass',
1500        ...         'north',
1501        ...         'south',
1502        ...         'east',
1503        ...         'west'
1504        ...     ],
1505        ...     1
1506        ... )  # a mechanism specifier without a state will become a
1507        ...    # decision specifier
1508        (DecisionSpecifier(domain='main', zone='zone',\
1509 name='decision'), 5)
1510        >>> tokens = [
1511        ...     'set',
1512        ...     'main',
1513        ...     Lexeme.domainSeparator,
1514        ...     'zone',
1515        ...     Lexeme.zoneSeparator,
1516        ...     'compass',
1517        ...     'north',
1518        ...     'bounce',
1519        ... ]
1520        >>> pf.parseOneEffectArg(tokens, 0)
1521        ('set', 0)
1522        >>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
1523        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1524        >>> pf.parseOneEffectArg(tokens, 1)
1525        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1526        >>> pf.parseOneEffectArg(tokens, 6)
1527        ('north', 6)
1528        >>> pf.parseOneEffectArg(tokens, 7)
1529        ('bounce', 7)
1530        >>> pf.parseOneEffectArg(
1531        ...     [
1532        ...         "fort", Lexeme.zoneSeparator, "gate",
1533        ...             Lexeme.mechanismSeparator, "open",
1534        ...     ],
1535        ...     0
1536        ... )
1537        ((MechanismSpecifier(domain=None, zone=None, decision='fort',\
1538 name='gate'), 'open'), 4)
1539        >>> pf.parseOneEffectArg(
1540        ...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
1541        ...     0
1542        ... ) == ([commands.command('val', '5')], 3)
1543        True
1544        >>> a = [
1545        ...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
1546        ...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
1547        ...     'pop', Lexeme.closeCurly
1548        ... ]
1549        >>> cl = [
1550        ...     [commands.command('val', '5')],
1551        ...     [commands.command('append'), commands.command('pop')]
1552        ... ]
1553        >>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
1554        True
1555        >>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
1556        True
1557        >>> pf.parseOneEffectArg(a, 1)
1558        ('val', 1)
1559        >>> pf.parseOneEffectArg(a, 2)
1560        ('5', 2)
1561        >>> pf.parseOneEffectArg(a, 3)
1562        Traceback (most recent call last):
1563        ...
1564        exploration.parsing.ParseError...
1565        """
1566        start, limit, nTokens = normalizeEnds(
1567            tokens,
1568            start,
1569            limit if limit is not None else -1
1570        )
1571        if nTokens == 0:
1572            raise ParseError("No effect arguments available.")
1573
1574        first = tokens[start]
1575
1576        if nTokens == 1:
1577            if first in (Lexeme.inCommon, Lexeme.isHidden):
1578                return (first, start)
1579            elif not isinstance(first, str):
1580                raise ParseError(
1581                    f"Only one token and it's a special character"
1582                    f" ({first} = {repr(self.formatDict[first])})"
1583                )
1584            else:
1585                return (cast(base.Capability, first), start)
1586
1587        assert (nTokens > 1)
1588
1589        second = tokens[start + 1]
1590
1591        # Command lists start with an open curly brace and effect
1592        # modifiers start with a Lexme, but nothing else may
1593        if first == Lexeme.openCurly:
1594            return self.parseCommandListFromTokens(tokens, start)
1595        elif first in (Lexeme.inCommon, Lexeme.isHidden):
1596            return (first, start)
1597        elif first in (Lexeme.sepOrDelay, Lexeme.effectCharges):
1598            if not isinstance(second, str):
1599                raise ParseError(
1600                    f"Token following a modifier that needs a count"
1601                    f" must be a string in tokens:"
1602                    f"\n{tokens[start:limit or len(tokens)]}"
1603                )
1604            try:
1605                val = int(second)
1606            except ValueError:
1607                raise ParseError(
1608                    f"Token following a modifier that needs a count"
1609                    f" must be convertible to an int:"
1610                    f"\n{tokens[start:limit or len(tokens)]}"
1611                )
1612
1613            first = cast(
1614                Literal[Lexeme.sepOrDelay, Lexeme.effectCharges],
1615                first
1616            )
1617            return ((first, val), start + 1)
1618        elif not isinstance(first, str):
1619            raise ParseError(
1620                f"First token must be a string unless it's a modifier"
1621                f" lexeme or command/reversion-set opener. Got:"
1622                f"\n{tokens[start:limit or len(tokens)]}"
1623            )
1624
1625        # If we have two strings in a row, then the first is our parsed
1626        # value alone and we'll parse the second separately.
1627        if isinstance(second, str):
1628            return (first, start)
1629        elif second in (Lexeme.inCommon, Lexeme.isHidden):
1630            return (first, start)
1631
1632        # Must have at least 3 tokens at this point, or else we need to
1633        # have the inCommon or isHidden lexeme second.
1634        if nTokens < 3:
1635            return (first, start)
1636
1637        third = tokens[start + 2]
1638        if not isinstance(third, str):
1639            return (first, start)
1640
1641        second = cast(Lexeme, second)
1642        third = cast(str, third)
1643
1644        if second in (Lexeme.tokenCount, Lexeme.skillLevel):
1645            try:
1646                num = int(third)
1647            except ValueError:
1648                raise ParseError(
1649                    f"Invalid effect tokens: count for Tokens or level"
1650                    f" for Skill must be convertible to an integer."
1651                    f"\n{tokens[start:limit + 1]}"
1652                )
1653            if second == Lexeme.tokenCount:
1654                return ((first, num), start + 2)  # token/count pair
1655            else:
1656                return (('skill', first, num), start + 2)  # token/count pair
1657
1658        elif second == Lexeme.mechanismSeparator:  # bare mechanism
1659            return (
1660                (
1661                    base.MechanismSpecifier(
1662                        domain=None,
1663                        zone=None,
1664                        decision=None,
1665                        name=first
1666                    ),
1667                    third
1668                ),
1669                start + 2
1670            )
1671
1672        elif second in (Lexeme.domainSeparator, Lexeme.zoneSeparator):
1673            try:
1674                mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
1675                    tokens,
1676                    start
1677                )  # works whether it's a mechanism or decision specifier...
1678            except ParseError:
1679                return self.parseDecisionSpecifierFromTokens(tokens, start)
1680            if mEnd + 2 > limit:
1681                # No room for following mechanism separator + state
1682                return self.parseDecisionSpecifierFromTokens(tokens, start)
1683            sep = tokens[mEnd + 1]
1684            after = tokens[mEnd + 2]
1685            if sep == Lexeme.mechanismSeparator:
1686                if not isinstance(after, str):
1687                    raise ParseError(
1688                        f"Mechanism separator not followed by state:"
1689                        f"\n{tokens[start]}"
1690                    )
1691                return ((mSpec, after), mEnd + 2)
1692            else:
1693                # No mechanism separator afterwards
1694                return self.parseDecisionSpecifierFromTokens(tokens, start)
1695
1696        else:  # unrecognized as a longer combo
1697            return (first, start)
1698
1699    def coalesceEffectArgs(
1700        self,
1701        tokens: LexedTokens,
1702        start: int = 0,
1703        end: int = -1
1704    ) -> Tuple[
1705        List[  # List of effect args
1706            Union[
1707                base.Capability,  # covers 'str' possibility
1708                Tuple[base.Token, base.TokenCount],
1709                Tuple[Literal['skill'], base.Skill, base.Level],
1710                Tuple[base.MechanismSpecifier, base.MechanismState],
1711                base.DecisionSpecifier,
1712                List[commands.Command],
1713                Set[str]
1714            ]
1715        ],
1716        Tuple[  # Slots for modifiers: common/hidden/charges/delay
1717            Optional[bool],
1718            Optional[bool],
1719            Optional[int],
1720            Optional[int],
1721        ]
1722    ]:
1723        """
1724        Given a region of a lexed tokens list which contains one or more
1725        effect arguments, combines token sequences representing things
1726        like capabilities, mechanism states, token counts, and skill
1727        levels, representing these using the tuples that would be passed
1728        to `base.effect`. Returns a tuple with two elements:
1729
1730        - First, a list that contains several different kinds of
1731            objects, each of which is distinguishable by its type or
1732            part of its value.
1733        - Next, a tuple with four entires for common, hidden, charges,
1734            and/or delay values based on the presence of modifier
1735            sequences. Any or all of these may be `None` if the relevant
1736            modifier was not present (the usual case).
1737
1738        For example:
1739
1740        >>> pf = ParseFormat()
1741        >>> pf.coalesceEffectArgs(["jump"])
1742        (['jump'], (None, None, None, None))
1743        >>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
1744        ([('coin', 3), 'fly'], (None, None, None, None))
1745        >>> pf.coalesceEffectArgs(
1746        ...     [
1747        ...         "fort", Lexeme.zoneSeparator, "gate",
1748        ...             Lexeme.mechanismSeparator, "open"
1749        ...     ]
1750        ... )
1751        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1752 name='gate'), 'open')], (None, None, None, None))
1753        >>> pf.coalesceEffectArgs(
1754        ...     [
1755        ...         "main", Lexeme.domainSeparator, "cliff"
1756        ...     ]
1757        ... )
1758        ([DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1759 (None, None, None, None))
1760        >>> pf.coalesceEffectArgs(
1761        ...     [
1762        ...         "door", Lexeme.mechanismSeparator, "open"
1763        ...     ]
1764        ... )
1765        ([(MechanismSpecifier(domain=None, zone=None, decision=None,\
1766 name='door'), 'open')], (None, None, None, None))
1767        >>> pf.coalesceEffectArgs(
1768        ...     [
1769        ...         "fort", Lexeme.zoneSeparator, "gate",
1770        ...             Lexeme.mechanismSeparator, "open",
1771        ...         "canJump",
1772        ...         "coins", Lexeme.tokenCount, "3",
1773        ...         Lexeme.inCommon,
1774        ...         "agility", Lexeme.skillLevel, "-1",
1775        ...         Lexeme.sepOrDelay, "0",
1776        ...         "main", Lexeme.domainSeparator, "cliff"
1777        ...     ]
1778        ... )
1779        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1780 name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1),\
1781 DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1782 (True, None, None, 0))
1783        >>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
1784        (['bounce'], (None, True, None, None))
1785        >>> pf.coalesceEffectArgs(
1786        ...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
1787        ... )
1788        (['goto', '3'], (True, True, None, None))
1789        """
1790        start, end, nTokens = normalizeEnds(tokens, start, end)
1791        where = start
1792        result: List[  # List of effect args
1793            Union[
1794                base.Capability,  # covers 'str' possibility
1795                Tuple[base.Token, base.TokenCount],
1796                Tuple[Literal['skill'], base.Skill, base.Level],
1797                Tuple[base.MechanismSpecifier, base.MechanismState],
1798                base.DecisionSpecifier,
1799                List[commands.Command],
1800                Set[str]
1801            ]
1802        ] = []
1803        inCommon: Optional[bool] = None
1804        isHidden: Optional[bool] = None
1805        charges: Optional[int] = None
1806        delay: Optional[int] = None
1807        while where <= end:
1808            following, thisEnd = self.parseOneEffectArg(tokens, where, end)
1809            if following == Lexeme.inCommon:
1810                if inCommon is not None:
1811                    raise ParseError(
1812                        f"In-common effect modifier specified more than"
1813                        f" once in effect args:"
1814                        f"\n{tokens[start:end + 1]}"
1815                    )
1816                inCommon = True
1817            elif following == Lexeme.isHidden:
1818                if isHidden is not None:
1819                    raise ParseError(
1820                        f"Is-hidden effect modifier specified more than"
1821                        f" once in effect args:"
1822                        f"\n{tokens[start:end + 1]}"
1823                    )
1824                isHidden = True
1825            elif (
1826                isinstance(following, tuple)
1827            and len(following) == 2
1828            and following[0] in (Lexeme.effectCharges, Lexeme.sepOrDelay)
1829            and isinstance(following[1], int)
1830            ):
1831                if following[0] == Lexeme.effectCharges:
1832                    if charges is not None:
1833                        raise ParseError(
1834                            f"Charges effect modifier specified more than"
1835                            f" once in effect args:"
1836                            f"\n{tokens[start:end + 1]}"
1837                        )
1838                    charges = following[1]
1839                else:
1840                    if delay is not None:
1841                        raise ParseError(
1842                            f"Delay effect modifier specified more than"
1843                            f" once in effect args:"
1844                            f"\n{tokens[start:end + 1]}"
1845                        )
1846                    delay = following[1]
1847            elif (
1848                    isinstance(following, base.Capability)
1849                 or (
1850                    isinstance(following, tuple)
1851                and len(following) == 2
1852                and isinstance(following[0], base.Token)
1853                and isinstance(following[1], base.TokenCount)
1854                ) or (
1855                    isinstance(following, tuple)
1856                and len(following) == 3
1857                and following[0] == 'skill'
1858                and isinstance(following[1], base.Skill)
1859                and isinstance(following[2], base.Level)
1860                ) or (
1861                    isinstance(following, tuple)
1862                and len(following) == 2
1863                and isinstance(following[0], base.MechanismSpecifier)
1864                and isinstance(following[1], base.MechanismState)
1865                ) or (
1866                    isinstance(following, base.DecisionSpecifier)
1867                ) or (
1868                    isinstance(following, list)
1869                and all(isinstance(item, tuple) for item in following)
1870                    # TODO: Stricter command list check here?
1871                ) or (
1872                    isinstance(following, set)
1873                and all(isinstance(item, str) for item in following)
1874                )
1875            ):
1876                result.append(following)
1877            else:
1878                raise ParseError(f"Invalid coalesced argument: {following}")
1879            where = thisEnd + 1
1880
1881        return (result, (inCommon, isHidden, charges, delay))
1882
1883    def parseEffectFromTokens(
1884        self,
1885        tokens: LexedTokens,
1886        start: int = 0,
1887        end: int = -1
1888    ) -> base.Effect:
1889        """
1890        Given a region of a list of lexed tokens specifying an effect,
1891        returns the `Effect` object that those tokens specify.
1892        """
1893        start, end, nTokens = normalizeEnds(tokens, start, end)
1894
1895        # Check for empty list
1896        if nTokens == 0:
1897            raise ParseError(
1898                "Effect must include at least a type."
1899            )
1900
1901        firstPart = tokens[start]
1902
1903        if isinstance(firstPart, Lexeme):
1904            raise ParseError(
1905                f"First part of effect must be an effect type. Got"
1906                f" {firstPart} ({repr(self.formatDict[firstPart])})."
1907            )
1908
1909        firstPart = cast(str, firstPart)
1910
1911        # Get the effect type
1912        fType = self.effectType(firstPart)
1913
1914        if fType is None:
1915            raise ParseError(
1916                f"Unrecognized effect type {firstPart!r}. Check the"
1917                f" EffectType entries in the effect names dictionary."
1918            )
1919
1920        if start + 1 > end:  # No tokens left: set empty args
1921            groupedArgs: List[
1922                Union[
1923                    base.Capability,  # covers 'str' possibility
1924                    Tuple[base.Token, base.TokenCount],
1925                    Tuple[Literal['skill'], base.Skill, base.Level],
1926                    Tuple[base.MechanismSpecifier, base.MechanismState],
1927                    base.DecisionSpecifier,
1928                    List[commands.Command],
1929                    Set[str]
1930                ]
1931            ] = []
1932            modifiers: Tuple[
1933                Optional[bool],
1934                Optional[bool],
1935                Optional[int],
1936                Optional[int]
1937            ] = (None, None, None, None)
1938        else:  # Coalesce remaining tokens if there are any
1939            groupedArgs, modifiers = self.coalesceEffectArgs(
1940                tokens,
1941                start + 1,
1942                end
1943            )
1944
1945        # Set up arguments for base.effect and handle modifiers first
1946        args: Dict[
1947            str,
1948            Union[
1949                None,
1950                base.ContextSpecifier,
1951                base.Capability,
1952                Tuple[base.Token, base.TokenCount],
1953                Tuple[Literal['skill'], base.Skill, base.Level],
1954                Tuple[base.MechanismSpecifier, base.MechanismState],
1955                Tuple[base.MechanismSpecifier, List[base.MechanismState]],
1956                List[base.Capability],
1957                base.AnyDecisionSpecifier,
1958                Tuple[base.AnyDecisionSpecifier, base.FocalPointName],
1959                bool,
1960                int,
1961                base.SaveSlot,
1962                Tuple[base.SaveSlot, Set[str]]
1963            ]
1964        ] = {}
1965        if modifiers[0]:
1966            args['applyTo'] = 'common'
1967        if modifiers[1]:
1968            args['hidden'] = True
1969        else:
1970            args['hidden'] = False
1971        if modifiers[2] is not None:
1972            args['charges'] = modifiers[2]
1973        if modifiers[3] is not None:
1974            args['delay'] = modifiers[3]
1975
1976        # Now handle the main effect-type-based argument
1977        if fType in ("gain", "lose"):
1978            if len(groupedArgs) != 1:
1979                raise ParseError(
1980                    f"'{fType}' effect must have exactly one grouped"
1981                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
1982                )
1983            thing = groupedArgs[0]
1984            if isinstance(thing, tuple):
1985                if len(thing) == 2:
1986                    if (
1987                        not isinstance(thing[0], base.Token)
1988                     or not isinstance(thing[1], base.TokenCount)
1989                    ):
1990                        raise ParseError(
1991                            f"'{fType}' effect grouped arg pair must be a"
1992                            f" (token, amount) pair. Got:\n{thing}"
1993                        )
1994                elif len(thing) == 3:
1995                    if (
1996                        thing[0] != 'skill'
1997                     or not isinstance(thing[1], base.Skill)
1998                     or not isinstance(thing[2], base.Level)
1999                    ):
2000                        raise ParseError(
2001                            f"'{fType}' effect grouped arg pair must be a"
2002                            f" (token, amount) pair. Got:\n{thing}"
2003                        )
2004                else:
2005                    raise ParseError(
2006                        f"'{fType}' effect grouped arg tuple must have"
2007                        f" length 2 or 3. Got (length {len(thing)}):\n{thing}"
2008                    )
2009            elif not isinstance(thing, base.Capability):
2010                raise ParseError(
2011                    f"'{fType}' effect grouped arg must be a capability"
2012                    f" or a (token, amount) tuple. Got:\n{thing}"
2013                )
2014            args[fType] = thing
2015            return base.effect(**args)  # type:ignore
2016
2017        elif fType == "set":
2018            if len(groupedArgs) != 1:
2019                raise ParseError(
2020                    f"'{fType}' effect must have exactly one grouped"
2021                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
2022                )
2023            setVal = groupedArgs[0]
2024            if not isinstance(
2025                setVal,
2026                tuple
2027            ):
2028                raise ParseError(
2029                    f"'{fType}' effect grouped arg must be a tuple. Got:"
2030                    f"\n{setVal}"
2031                )
2032            if len(setVal) == 2:
2033                setWhat, setTo = setVal
2034                if (
2035                    isinstance(setWhat, base.Token)
2036                and isinstance(setTo, base.TokenCount)
2037                ) or (
2038                    isinstance(setWhat, base.MechanismSpecifier)
2039                and isinstance(setTo, base.MechanismState)
2040                ):
2041                    args[fType] = setVal
2042                    return base.effect(**args)  # type:ignore
2043                else:
2044                    raise ParseError(
2045                        f"Invalid '{fType}' effect grouped args:"
2046                        f"\n{groupedArgs}"
2047                    )
2048            elif len(setVal) == 3:
2049                indicator, whichSkill, setTo = setVal
2050                if (
2051                    indicator == 'skill'
2052                and isinstance(whichSkill, base.Skill)
2053                and isinstance(setTo, base.Level)
2054                ):
2055                    args[fType] = setVal
2056                    return base.effect(**args)  # type:ignore
2057                else:
2058                    raise ParseError(
2059                        f"Invalid '{fType}' effect grouped args (not a"
2060                        f" skill):\n{groupedArgs}"
2061                    )
2062            else:
2063                raise ParseError(
2064                    f"Invalid '{fType}' effect grouped args (wrong"
2065                    f" length tuple):\n{groupedArgs}"
2066                )
2067
2068        elif fType == "toggle":
2069            if len(groupedArgs) == 0:
2070                raise ParseError(
2071                    f"'{fType}' effect must have at least one grouped"
2072                    f" argument. Got:\n{groupedArgs}"
2073                )
2074            if (
2075                isinstance(groupedArgs[0], tuple)
2076            and len(groupedArgs[0]) == 2
2077            and isinstance(groupedArgs[0][0], base.MechanismSpecifier)
2078            and isinstance(groupedArgs[0][1], base.MechanismState)
2079            and all(
2080                    isinstance(a, base.MechanismState)
2081                    for a in groupedArgs[1:]
2082                )
2083            ):  # a mechanism toggle
2084                args[fType] = (
2085                    groupedArgs[0][0],
2086                    cast(
2087                        List[base.MechanismState],
2088                        [groupedArgs[0][1]] + groupedArgs[1:]
2089                    )
2090                )
2091                return base.effect(**args)  # type:ignore
2092            elif all(isinstance(a, base.Capability) for a in groupedArgs):
2093                # a capability toggle
2094                args[fType] = cast(List[base.Capability], groupedArgs)
2095                return base.effect(**args)  # type:ignore
2096            else:
2097                raise ParseError(
2098                    f"Invalid arguments for '{fType}' effect. Got:"
2099                    f"\n{groupedArgs}"
2100                )
2101
2102        elif fType in ("bounce", "deactivate"):
2103            if len(groupedArgs) != 0:
2104                raise ParseError(
2105                    f"'{fType}' effect may not include any"
2106                    f" arguments. Got {len(groupedArgs)}):"
2107                    f"\n{groupedArgs}"
2108                )
2109            args[fType] = True
2110            return base.effect(**args)  # type:ignore
2111
2112        elif fType == "follow":
2113            if len(groupedArgs) != 1:
2114                raise ParseError(
2115                    f"'{fType}' effect must include exactly one"
2116                    f" argument. Got {len(groupedArgs)}):"
2117                    f"\n{groupedArgs}"
2118                )
2119
2120            transition = groupedArgs[0]
2121            if not isinstance(transition, base.Transition):
2122                raise ParseError(
2123                    f"Invalid argument for '{fType}' effect. Needed a"
2124                    f" transition but got:\n{groupedArgs}"
2125                )
2126            args[fType] = transition
2127            return base.effect(**args)  # type:ignore
2128
2129        elif fType == "edit":
2130            if len(groupedArgs) == 0:
2131                raise ParseError(
2132                    "An 'edit' effect requires at least one argument."
2133                )
2134            for i, arg in enumerate(groupedArgs):
2135                if not isinstance(arg, list):
2136                    raise ParseError(
2137                        f"'edit' effect argument {i} is not a sub-list:"
2138                        f"\n  {arg!r}"
2139                        f"\nAmong arguments:"
2140                        f"\n  {groupedArgs}"
2141                    )
2142                for j, cmd in enumerate(arg):
2143                    if not isinstance(cmd, tuple):
2144                        raise ParseError(
2145                            f"'edit' effect argument {i} contains"
2146                            f" non-tuple part {j}:"
2147                            f"\n  {cmd!r}"
2148                            f"\nAmong arguments:"
2149                            f"\n  {groupedArgs}"
2150                        )
2151
2152            args[fType] = groupedArgs  # type:ignore
2153            return base.effect(**args)  # type:ignore
2154
2155        elif fType == "goto":
2156            if len(groupedArgs) not in (1, 2):
2157                raise ParseError(
2158                    f"A 'goto' effect must include either one or two"
2159                    f" grouped arguments. Got {len(groupedArgs)}:"
2160                    f"\n{groupedArgs}"
2161                )
2162
2163            first = groupedArgs[0]
2164            if not isinstance(
2165                first,
2166                (base.DecisionName, base.DecisionSpecifier)
2167            ):
2168                raise ParseError(
2169                    f"'{fType}' effect must first specify a destination"
2170                    f" decision. Got:\n{groupedArgs}"
2171                )
2172
2173            # Check if it's really a decision ID
2174            dSpec: base.AnyDecisionSpecifier
2175            if isinstance(first, base.DecisionName):
2176                try:
2177                    dSpec = int(first)
2178                except ValueError:
2179                    dSpec = first
2180            else:
2181                dSpec = first
2182
2183            if len(groupedArgs) == 2:
2184                second = groupedArgs[1]
2185                if not isinstance(second, base.FocalPointName):
2186                    raise ParseError(
2187                        f"'{fType}' effect must have a focal point name"
2188                        f" if it has a second part. Got:\n{groupedArgs}"
2189                    )
2190                args[fType] = (dSpec, second)
2191            else:
2192                args[fType] = dSpec
2193
2194            return base.effect(**args)  # type:ignore
2195
2196        elif fType == "save":
2197            if len(groupedArgs) not in (0, 1):
2198                raise ParseError(
2199                    f"'{fType}' effect must include exactly zero or one"
2200                    f" argument(s). Got {len(groupedArgs)}):"
2201                    f"\n{groupedArgs}"
2202                )
2203
2204            if len(groupedArgs) == 1:
2205                slot = groupedArgs[0]
2206            else:
2207                slot = base.DEFAULT_SAVE_SLOT
2208            if not isinstance(slot, base.SaveSlot):
2209                raise ParseError(
2210                    f"Invalid argument for '{fType}' effect. Needed a"
2211                    f" save slot but got:\n{groupedArgs}"
2212                )
2213            args[fType] = slot
2214            return base.effect(**args)  # type:ignore
2215
2216        else:
2217            raise ParseError(f"Invalid effect type: '{fType}'.")
2218
2219    def parseEffect(self, effectStr: str) -> base.Effect:
2220        """
2221        Works like `parseEffectFromTokens` but starts with a raw string.
2222        For example:
2223
2224        >>> pf = ParseFormat()
2225        >>> pf.parseEffect("gain jump") == base.effect(gain='jump')
2226        True
2227        >>> pf.parseEffect("set door:open") == base.effect(
2228        ...     set=(
2229        ...         base.MechanismSpecifier(None, None, None, 'door'),
2230        ...         'open'
2231        ...     )
2232        ... )
2233        True
2234        >>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
2235        True
2236        >>> pf.parseEffect("set agility^3") == base.effect(
2237        ...     set=('skill', 'agility', 3)
2238        ... )
2239        True
2240        """
2241        return self.parseEffectFromTokens(self.lex(effectStr))
2242
2243    def unparseEffect(self, effect: base.Effect) -> str:
2244        """
2245        The opposite of `parseEffect`; turns an effect back into a
2246        string reprensentation.
2247
2248        For example:
2249
2250        >>> pf = ParseFormat()
2251        >>> e = {
2252        ...     "type": "gain",
2253        ...     "applyTo": "active",
2254        ...     "value": "flight",
2255        ...     "delay": None,
2256        ...     "charges": None,
2257        ...     "hidden": False
2258        ... }
2259        >>> pf.unparseEffect(e)
2260        'gain flight'
2261        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2262        True
2263        >>> s = 'gain flight'
2264        >>> pf.unparseEffect(pf.parseEffect(s)) == s
2265        True
2266        >>> s2 = '  gain\\nflight'
2267        >>> pf.unparseEffect(pf.parseEffect(s2)) == s
2268        True
2269        >>> e = {
2270        ...     "type": "gain",
2271        ...     "applyTo": "active",
2272        ...     "value": ("gold", 5),
2273        ...     "delay": 1,
2274        ...     "charges": 2,
2275        ...     "hidden": False
2276        ... }
2277        >>> pf.unparseEffect(e)
2278        'gain gold*5 ,1 =2'
2279        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2280        True
2281        >>> e = {
2282        ...     "type": "set",
2283        ...     "applyTo": "active",
2284        ...     "value": (
2285        ...         base.MechanismSpecifier(None, None, None, "gears"),
2286        ...         "on"
2287        ...     ),
2288        ...     "delay": None,
2289        ...     "charges": 1,
2290        ...     "hidden": False
2291        ... }
2292        >>> pf.unparseEffect(e)
2293        'set gears:on =1'
2294        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2295        True
2296        >>> e = {
2297        ...     "type": "toggle",
2298        ...     "applyTo": "active",
2299        ...     "value": ["red", "blue"],
2300        ...     "delay": None,
2301        ...     "charges": None,
2302        ...     "hidden": False
2303        ... }
2304        >>> pf.unparseEffect(e)
2305        'toggle red blue'
2306        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2307        True
2308        >>> e = {
2309        ...     "type": "toggle",
2310        ...     "applyTo": "active",
2311        ...     "value": (
2312        ...         base.MechanismSpecifier(None, None, None, "switch"),
2313        ...         ["on", "off"]
2314        ...     ),
2315        ...     "delay": None,
2316        ...     "charges": None,
2317        ...     "hidden": False
2318        ... }
2319        >>> pf.unparseEffect(e)
2320        'toggle switch:on off'
2321        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2322        True
2323        >>> e = {
2324        ...     "type": "deactivate",
2325        ...     "applyTo": "active",
2326        ...     "value": None,
2327        ...     "delay": 2,
2328        ...     "charges": None,
2329        ...     "hidden": False
2330        ... }
2331        >>> pf.unparseEffect(e)
2332        'deactivate ,2'
2333        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2334        True
2335        >>> e = {
2336        ...     "type": "goto",
2337        ...     "applyTo": "common",
2338        ...     "value": 3,
2339        ...     "delay": None,
2340        ...     "charges": None,
2341        ...     "hidden": False
2342        ... }
2343        >>> pf.unparseEffect(e)
2344        'goto 3 +c'
2345        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2346        True
2347        >>> e = {
2348        ...     "type": "goto",
2349        ...     "applyTo": "common",
2350        ...     "value": 3,
2351        ...     "delay": None,
2352        ...     "charges": None,
2353        ...     "hidden": True
2354        ... }
2355        >>> pf.unparseEffect(e)
2356        'goto 3 +c +h'
2357        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2358        True
2359        >>> e = {
2360        ...     "type": "goto",
2361        ...     "applyTo": "active",
2362        ...     "value": 'home',
2363        ...     "delay": None,
2364        ...     "charges": None,
2365        ...     "hidden": False
2366        ... }
2367        >>> pf.unparseEffect(e)
2368        'goto home'
2369        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2370        True
2371        >>> e = base.effect(edit=[
2372        ...     [
2373        ...         commands.command('val', '5'),
2374        ...         commands.command('empty', 'list'),
2375        ...         commands.command('append', '$_')
2376        ...     ],
2377        ...     [
2378        ...         commands.command('val', '11'),
2379        ...         commands.command('assign', 'var', '$_'),
2380        ...         commands.command('op', '+', '$var', '$var')
2381        ...     ],
2382        ... ])
2383        >>> pf.unparseEffect(e)
2384        'edit {\\n  val 5;\\n  empty list;\\n  append $_;\\n}\
2385 {\\n  val 11;\\n  assign var $_;\\n  op + $var $var;\\n}'
2386        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2387        True
2388        """
2389        result: List[str] = []
2390
2391        # Reverse the effect type into a marker
2392        eType = effect['type']
2393        for key, val in self.effectNames.items():
2394            if val == eType:
2395                if len(result) != 0:
2396                    raise ParseError(
2397                        f"Effect map contains multiple matching entries"
2398                        f"for effect type '{effect['type']}':"
2399                        f" '{result[0]}' and '{key}'"
2400                    )
2401                result.append(key)
2402                # Don't break 'cause we'd like to check uniqueness
2403
2404        eVal = effect['value']
2405        if eType in ('gain', 'lose'):
2406            eVal = cast(Union[base.Capability, Tuple[base.Token, int]], eVal)
2407            if isinstance(eVal, str):  # a capability
2408                result.append(eVal)
2409            else:  # a token
2410                result.append(
2411                    eVal[0]
2412                  + self.formatDict[Lexeme.tokenCount]
2413                  + str(eVal[1])
2414                )
2415        elif eType == 'set':
2416            eVal = cast(
2417                # TODO: Add skill level setting here & elsewhere
2418                Union[
2419                    Tuple[base.Token, base.TokenCount],
2420                    Tuple[base.MechanismName, base.MechanismState]
2421                ],
2422                eVal
2423            )
2424            if len(eVal) != 2:
2425                raise ValueError(
2426                    f"'set' effect has non-length-2 value:"
2427                    f"\n  {repr(effect)}"
2428                )
2429            if isinstance(eVal[1], int):  # a token count
2430                result.append(eVal[0])
2431                result.append(self.formatDict[Lexeme.tokenCount])
2432                result.append(str(eVal[1]))
2433            else:  # a mechanism
2434                if isinstance(eVal[0], base.MechanismSpecifier):
2435                    mSpec = self.unparseMechanismSpecifier(eVal[0])
2436                else:
2437                    assert isinstance(eVal[0], base.MechanismName)
2438                    mSpec = eVal[0]
2439                result.append(
2440                    mSpec
2441                  + self.formatDict[Lexeme.mechanismSeparator]
2442                  + eVal[1]
2443                )
2444        elif eType == 'toggle':
2445            if isinstance(eVal, tuple):  # mechanism states
2446                tSpec, states = cast(
2447                    Tuple[
2448                        base.AnyMechanismSpecifier,
2449                        List[base.MechanismState]
2450                    ],
2451                    eVal
2452                )
2453                firstState = states[0]
2454                restStates = states[1:]
2455                if isinstance(tSpec, base.MechanismSpecifier):
2456                    mStr = self.unparseMechanismSpecifier(tSpec)
2457                else:
2458                    mStr = str(tSpec)
2459                result.append(
2460                    mStr
2461                  + self.formatDict[Lexeme.mechanismSeparator]
2462                  + firstState
2463                )
2464                result.extend(restStates)
2465            else:  # capabilities
2466                assert isinstance(eVal, list)
2467                eVal = cast(List[base.Capability], eVal)
2468                result.extend(eVal)
2469        elif eType in ('deactivate', 'bounce'):
2470            if eVal is not None:
2471                raise ValueError(
2472                    f"'{eType}' effect has non-None value:"
2473                    f"\n  {repr(effect)}"
2474                )
2475        elif eType == 'follow':
2476            eVal = cast(base.Token, eVal)
2477            result.append(eVal)
2478        elif eType == 'edit':
2479            eVal = cast(List[List[commands.Command]], eVal)
2480            if len(eVal) == 0:
2481                result[-1] = '{}'
2482            else:
2483                for cmdList in eVal:
2484                    result.append(
2485                        self.unparseCommandList(cmdList)
2486                    )
2487        elif eType == 'goto':
2488            if isinstance(eVal, base.DecisionSpecifier):
2489                result.append(self.unparseDecisionSpecifier(eVal))
2490            elif isinstance(eVal, (base.DecisionID, base.DecisionName)):
2491                result.append(str(eVal))
2492            elif (
2493                isinstance(eVal, tuple)
2494            and len(eVal) == 2
2495            and isinstance(eVal[1], base.FocalPointName)
2496            ):
2497                if isinstance(eVal[0], base.DecisionSpecifier):
2498                    result.append(self.unparseDecisionSpecifier(eVal[0]))
2499                else:
2500                    result.append(str(eVal[0]))
2501                result.append(eVal[1])
2502            else:
2503                raise ValueError(
2504                    f"'{eType}' effect has invalid value {eVal}"
2505                )
2506        else:
2507            raise ValueError(
2508                f"Unrecognized effect type '{eType}' in effect:"
2509                f"\n  {repr(effect)}"
2510            )
2511
2512        # Add modifier strings
2513        if effect['applyTo'] == 'common':
2514            result.append(self.formatDict[Lexeme.inCommon])
2515
2516        if effect['hidden']:
2517            result.append(self.formatDict[Lexeme.isHidden])
2518
2519        dVal = effect['delay']
2520        if dVal is not None:
2521            result.append(
2522                self.formatDict[Lexeme.sepOrDelay] + str(dVal)
2523            )
2524
2525        cVal = effect['charges']
2526        if cVal is not None:
2527            result.append(
2528                self.formatDict[Lexeme.effectCharges] + str(cVal)
2529            )
2530
2531        joined = ''
2532        before = False
2533        for r in result:
2534            if (
2535                r.startswith(' ')
2536             or r.startswith('\n')
2537             or r.endswith(' ')
2538             or r.endswith('\n')
2539            ):
2540                joined += r
2541                before = False
2542            else:
2543                joined += (' ' if before else '') + r
2544                before = True
2545        return joined
2546
2547    def parseDecisionSpecifierFromTokens(
2548        self,
2549        tokens: LexedTokens,
2550        start: int = 0
2551    ) -> Tuple[Union[base.DecisionSpecifier, int], int]:
2552        """
2553        Parses a decision specifier starting at the specified position
2554        in the given tokens list. No ending position is specified, but
2555        instead this function returns a tuple containing the parsed
2556        `base.DecisionSpecifier` along with an index in the tokens list
2557        where the end of the specifier was found.
2558
2559        For example:
2560
2561        >>> pf = ParseFormat()
2562        >>> pf.parseDecisionSpecifierFromTokens(['m'])
2563        (DecisionSpecifier(domain=None, zone=None, name='m'), 0)
2564        >>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
2565        (12, 0)
2566        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
2567        (DecisionSpecifier(domain=None, zone=None, name='a'), 0)
2568        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
2569        (DecisionSpecifier(domain=None, zone=None, name='m'), 1)
2570        >>> pf.parseDecisionSpecifierFromTokens(
2571        ...     ['a', Lexeme.domainSeparator, 'm']
2572        ... )
2573        (DecisionSpecifier(domain='a', zone=None, name='m'), 2)
2574        >>> pf.parseDecisionSpecifierFromTokens(
2575        ...     ['a', Lexeme.zoneSeparator, 'm']
2576        ... )
2577        (DecisionSpecifier(domain=None, zone='a', name='m'), 2)
2578        >>> pf.parseDecisionSpecifierFromTokens(
2579        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
2580        ... )
2581        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2582        >>> pf.parseDecisionSpecifierFromTokens(
2583        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
2584        ... )
2585        (DecisionSpecifier(domain='a', zone='b', name='m'), 4)
2586        >>> pf.parseDecisionSpecifierFromTokens(
2587        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
2588        ... )
2589        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2590        >>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
2591        ...     ['a', Lexeme.zoneSeparator, '5'],
2592        ... )
2593        Traceback (most recent call last):
2594        ...
2595        exploration.base.InvalidDecisionSpecifierError...
2596        >>> pf.parseDecisionSpecifierFromTokens(
2597        ...     ['d', Lexeme.domainSeparator, '123']
2598        ... )
2599        Traceback (most recent call last):
2600        ...
2601        exploration.base.InvalidDecisionSpecifierError...
2602        >>> pf.parseDecisionSpecifierFromTokens(
2603        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2604        ...     1
2605        ... )
2606        Traceback (most recent call last):
2607        ...
2608        exploration.parsing.ParseError...
2609        >>> pf.parseDecisionSpecifierFromTokens(
2610        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2611        ...     2
2612        ... )
2613        (DecisionSpecifier(domain='b', zone=None, name='m'), 4)
2614        >>> pf.parseDecisionSpecifierFromTokens(
2615        ...     [
2616        ...         'a',
2617        ...         Lexeme.domainSeparator,
2618        ...         'b',
2619        ...         Lexeme.zoneSeparator,
2620        ...         'c',
2621        ...         Lexeme.zoneSeparator,
2622        ...         'm'
2623        ...     ]
2624        ... )
2625        (DecisionSpecifier(domain='a', zone='b', name='c'), 4)
2626        >>> pf.parseDecisionSpecifierFromTokens(
2627        ...     [
2628        ...         'a',
2629        ...         Lexeme.domainSeparator,
2630        ...         'b',
2631        ...         Lexeme.zoneSeparator,
2632        ...         'c',
2633        ...         Lexeme.zoneSeparator,
2634        ...         'm'
2635        ...     ],
2636        ...     2
2637        ... )
2638        (DecisionSpecifier(domain=None, zone='b', name='c'), 4)
2639        >>> pf.parseDecisionSpecifierFromTokens(
2640        ...     [
2641        ...         'a',
2642        ...         Lexeme.domainSeparator,
2643        ...         'b',
2644        ...         Lexeme.zoneSeparator,
2645        ...         'c',
2646        ...         Lexeme.zoneSeparator,
2647        ...         'm'
2648        ...     ],
2649        ...     4
2650        ... )
2651        (DecisionSpecifier(domain=None, zone='c', name='m'), 6)
2652        >>> pf.parseDecisionSpecifierFromTokens(
2653        ...     [
2654        ...         'set',
2655        ...         'main',
2656        ...         Lexeme.domainSeparator,
2657        ...         'zone',
2658        ...         Lexeme.zoneSeparator,
2659        ...         'compass',
2660        ...         'north',
2661        ...         'bounce',
2662        ...     ],
2663        ...     1
2664        ... )
2665        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
2666        """
2667        # Check bounds & normalize start index
2668        nTokens = len(tokens)
2669        if start < -nTokens:
2670            raise IndexError(
2671                f"Invalid start index {start} for {nTokens} tokens (too"
2672                f" negative)."
2673            )
2674        elif start >= nTokens:
2675            raise IndexError(
2676                f"Invalid start index {start} for {nTokens} tokens (too"
2677                f" big)."
2678            )
2679        elif start < 0:
2680            start = nTokens + start
2681
2682        assert (start < nTokens)
2683
2684        first = tokens[start]
2685        if not isinstance(first, str):
2686            raise ParseError(
2687                f"Invalid domain specifier (must start with a name or"
2688                f" id; got: {first} = {self.formatDict[first]})."
2689            )
2690
2691        ds = base.DecisionSpecifier(None, None, first)
2692        result = (base.idOrDecisionSpecifier(ds), start)
2693
2694        domain = None
2695        zoneOrDecision = None
2696
2697        if start + 1 >= nTokens:  # at end of tokens
2698            return result
2699
2700        firstSep = tokens[start + 1]
2701        if firstSep == Lexeme.domainSeparator:
2702            domain = first
2703        elif firstSep == Lexeme.zoneSeparator:
2704            zoneOrDecision = first
2705        else:
2706            return result
2707
2708        if start + 2 >= nTokens:
2709            return result
2710
2711        second = tokens[start + 2]
2712        if isinstance(second, Lexeme):
2713            return result
2714
2715        ds = base.DecisionSpecifier(domain, zoneOrDecision, second)
2716        result = (base.idOrDecisionSpecifier(ds), start + 2)
2717
2718        if start + 3 >= nTokens:
2719            return result
2720
2721        secondSep = tokens[start + 3]
2722        if start + 4 >= nTokens:
2723            return result
2724
2725        third = tokens[start + 4]
2726        if secondSep == Lexeme.zoneSeparator:
2727            if zoneOrDecision is not None:  # two in a row
2728                return result
2729            else:
2730                if not isinstance(third, base.DecisionName):
2731                    return result
2732                else:
2733                    zoneOrDecision = second
2734        else:
2735            return result
2736
2737        if isinstance(third, Lexeme):
2738            return result
2739
2740        ds = base.DecisionSpecifier(domain, zoneOrDecision, third)
2741        return (base.idOrDecisionSpecifier(ds), start + 4)
2742
2743    def parseDecisionSpecifier(
2744        self,
2745        specString: str
2746    ) -> Union[base.DecisionID, base.DecisionSpecifier]:
2747        """
2748        Parses a full `DecisionSpecifier` from a single string. Can
2749        parse integer decision IDs in string form, and returns a
2750        `DecisionID` in that case, otherwise returns a
2751        `DecisionSpecifier`. Assumes that all int-convertible strings
2752        are decision IDs, so it cannot deal with feature names which are
2753        just numbers.
2754
2755        For example:
2756
2757        >>> pf = ParseFormat()
2758        >>> pf.parseDecisionSpecifier('example')
2759        DecisionSpecifier(domain=None, zone=None, name='example')
2760        >>> pf.parseDecisionSpecifier('outer::example')
2761        DecisionSpecifier(domain=None, zone='outer', name='example')
2762        >>> pf.parseDecisionSpecifier('domain//region::feature')
2763        DecisionSpecifier(domain='domain', zone='region', name='feature')
2764        >>> pf.parseDecisionSpecifier('123')
2765        123
2766        >>> pf.parseDecisionSpecifier('region::domain//feature')
2767        Traceback (most recent call last):
2768        ...
2769        exploration.base.InvalidDecisionSpecifierError...
2770        >>> pf.parseDecisionSpecifier('domain1//domain2//feature')
2771        Traceback (most recent call last):
2772        ...
2773        exploration.base.InvalidDecisionSpecifierError...
2774        >>> pf.parseDecisionSpecifier('domain//123')
2775        Traceback (most recent call last):
2776        ...
2777        exploration.base.InvalidDecisionSpecifierError...
2778        >>> pf.parseDecisionSpecifier('region::123')
2779        Traceback (most recent call last):
2780        ...
2781        exploration.base.InvalidDecisionSpecifierError...
2782        """
2783        try:
2784            return int(specString)
2785        except ValueError:
2786            tokens = self.lex(specString)
2787            result, end = self.parseDecisionSpecifierFromTokens(tokens)
2788            if end != len(tokens) - 1:
2789                raise base.InvalidDecisionSpecifierError(
2790                    f"Junk after end of decision specifier:"
2791                    f"\n{tokens[end + 1:]}"
2792                )
2793            return result
2794
2795    def parseFeatureSpecifierFromTokens(
2796        self,
2797        tokens: LexedTokens,
2798        start: int = 0,
2799        limit: int = -1
2800    ) -> Tuple[base.FeatureSpecifier, int]:
2801        """
2802        Parses a `FeatureSpecifier` starting from the specified part of
2803        a tokens list. Returns a tuple containing the feature specifier
2804        and the end position of the end of the feature specifier.
2805
2806        Can parse integer feature IDs in string form, as well as nested
2807        feature specifiers and plain feature specifiers. Assumes that
2808        all int-convertible strings are feature IDs, so it cannot deal
2809        with feature names which are just numbers.
2810
2811        For example:
2812
2813        >>> pf = ParseFormat()
2814        >>> pf.parseFeatureSpecifierFromTokens(['example'])
2815        (FeatureSpecifier(domain=None, within=[], feature='example',\
2816 part=None), 0)
2817        >>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
2818        (FeatureSpecifier(domain=None, within=[], feature='example2',\
2819 part=None), 1)
2820        >>> pf.parseFeatureSpecifierFromTokens(
2821        ...     [
2822        ...         'domain',
2823        ...         Lexeme.domainSeparator,
2824        ...         'region',
2825        ...         Lexeme.zoneSeparator,
2826        ...         'feature',
2827        ...         Lexeme.partSeparator,
2828        ...         'part'
2829        ...     ]
2830        ... )
2831        (FeatureSpecifier(domain='domain', within=['region'],\
2832 feature='feature', part='part'), 6)
2833        >>> pf.parseFeatureSpecifierFromTokens(
2834        ...     [
2835        ...         'outerRegion',
2836        ...         Lexeme.zoneSeparator,
2837        ...         'midRegion',
2838        ...         Lexeme.zoneSeparator,
2839        ...         'innerRegion',
2840        ...         Lexeme.zoneSeparator,
2841        ...         'feature'
2842        ...     ]
2843        ... )
2844        (FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
2845 'innerRegion'], feature='feature', part=None), 6)
2846        >>> pf.parseFeatureSpecifierFromTokens(
2847        ...     [
2848        ...         'outerRegion',
2849        ...         Lexeme.zoneSeparator,
2850        ...         'midRegion',
2851        ...         Lexeme.zoneSeparator,
2852        ...         'innerRegion',
2853        ...         Lexeme.zoneSeparator,
2854        ...         'feature'
2855        ...     ],
2856        ...     1
2857        ... )
2858        Traceback (most recent call last):
2859        ...
2860        exploration.parsing.InvalidFeatureSpecifierError...
2861        >>> pf.parseFeatureSpecifierFromTokens(
2862        ...     [
2863        ...         'outerRegion',
2864        ...         Lexeme.zoneSeparator,
2865        ...         'midRegion',
2866        ...         Lexeme.zoneSeparator,
2867        ...         'innerRegion',
2868        ...         Lexeme.zoneSeparator,
2869        ...         'feature'
2870        ...     ],
2871        ...     2
2872        ... )
2873        (FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'],\
2874 feature='feature', part=None), 6)
2875        >>> pf.parseFeatureSpecifierFromTokens(
2876        ...     [
2877        ...         'outerRegion',
2878        ...         Lexeme.zoneSeparator,
2879        ...         'feature',
2880        ...         Lexeme.domainSeparator,
2881        ...         'after',
2882        ...     ]
2883        ... )
2884        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2885 feature='feature', part=None), 2)
2886        >>> pf.parseFeatureSpecifierFromTokens(
2887        ...     [
2888        ...         'outerRegion',
2889        ...         Lexeme.zoneSeparator,
2890        ...         'feature',
2891        ...         Lexeme.domainSeparator,
2892        ...         'after',
2893        ...     ],
2894        ...     2
2895        ... )
2896        (FeatureSpecifier(domain='feature', within=[], feature='after',\
2897 part=None), 4)
2898        >>> # Including a limit:
2899        >>> pf.parseFeatureSpecifierFromTokens(
2900        ...     [
2901        ...         'outerRegion',
2902        ...         Lexeme.zoneSeparator,
2903        ...         'midRegion',
2904        ...         Lexeme.zoneSeparator,
2905        ...         'feature',
2906        ...     ],
2907        ...     0,
2908        ...     2
2909        ... )
2910        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2911 feature='midRegion', part=None), 2)
2912        >>> pf.parseFeatureSpecifierFromTokens(
2913        ...     [
2914        ...         'outerRegion',
2915        ...         Lexeme.zoneSeparator,
2916        ...         'midRegion',
2917        ...         Lexeme.zoneSeparator,
2918        ...         'feature',
2919        ...     ],
2920        ...     0,
2921        ...     0
2922        ... )
2923        (FeatureSpecifier(domain=None, within=[], feature='outerRegion',\
2924 part=None), 0)
2925        >>> pf.parseFeatureSpecifierFromTokens(
2926        ...     [
2927        ...         'region',
2928        ...         Lexeme.zoneSeparator,
2929        ...         Lexeme.zoneSeparator,
2930        ...         'feature',
2931        ...     ]
2932        ... )
2933        (FeatureSpecifier(domain=None, within=[], feature='region',\
2934 part=None), 0)
2935        """
2936        start, limit, nTokens = normalizeEnds(tokens, start, limit)
2937
2938        if nTokens == 0:
2939            raise InvalidFeatureSpecifierError(
2940                "Can't parse a feature specifier from 0 tokens."
2941            )
2942        first = tokens[start]
2943        if isinstance(first, Lexeme):
2944            raise InvalidFeatureSpecifierError(
2945                f"Feature specifier can't begin with a special token."
2946                f"Got:\n{tokens[start:limit + 1]}"
2947            )
2948
2949        if nTokens in (1, 2):
2950            # 2 tokens isn't enough for a second part
2951            fs = base.FeatureSpecifier(
2952                domain=None,
2953                within=[],
2954                feature=first,
2955                part=None
2956            )
2957            return (base.normalizeFeatureSpecifier(fs), start)
2958
2959        firstSep = tokens[start + 1]
2960        secondPart = tokens[start + 2]
2961
2962        if (
2963            firstSep not in (
2964                Lexeme.domainSeparator,
2965                Lexeme.zoneSeparator,
2966                Lexeme.partSeparator
2967            )
2968         or not isinstance(secondPart, str)
2969        ):
2970            # Following tokens won't work out
2971            fs = base.FeatureSpecifier(
2972                domain=None,
2973                within=[],
2974                feature=first,
2975                part=None
2976            )
2977            return (base.normalizeFeatureSpecifier(fs), start)
2978
2979        if firstSep == Lexeme.domainSeparator:
2980            if start + 2 > limit:
2981                return (
2982                    base.FeatureSpecifier(
2983                        domain=first,
2984                        within=[],
2985                        feature=secondPart,
2986                        part=None
2987                    ),
2988                    start + 2
2989                )
2990            else:
2991                rest, restEnd = self.parseFeatureSpecifierFromTokens(
2992                    tokens,
2993                    start + 2,
2994                    limit
2995                )
2996                if rest.domain is not None:  # two domainSeparators in a row
2997                    fs = base.FeatureSpecifier(
2998                        domain=first,
2999                        within=[],
3000                        feature=rest.domain,
3001                        part=None
3002                    )
3003                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3004                else:
3005                    fs = base.FeatureSpecifier(
3006                        domain=first,
3007                        within=rest.within,
3008                        feature=rest.feature,
3009                        part=rest.part
3010                    )
3011                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3012
3013        elif firstSep == Lexeme.zoneSeparator:
3014            if start + 2 > limit:
3015                fs = base.FeatureSpecifier(
3016                    domain=None,
3017                    within=[first],
3018                    feature=secondPart,
3019                    part=None
3020                )
3021                return (base.normalizeFeatureSpecifier(fs), start + 2)
3022            else:
3023                rest, restEnd = self.parseFeatureSpecifierFromTokens(
3024                    tokens,
3025                    start + 2,
3026                    limit
3027                )
3028                if rest.domain is not None:  # domain sep after zone sep
3029                    fs = base.FeatureSpecifier(
3030                        domain=None,
3031                        within=[first],
3032                        feature=rest.domain,
3033                        part=None
3034                    )
3035                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3036                else:
3037                    within = [first]
3038                    within.extend(rest.within)
3039                    fs = base.FeatureSpecifier(
3040                        domain=None,
3041                        within=within,
3042                        feature=rest.feature,
3043                        part=rest.part
3044                    )
3045                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3046
3047        else:  # must be partSeparator
3048            fs = base.FeatureSpecifier(
3049                domain=None,
3050                within=[],
3051                feature=first,
3052                part=secondPart
3053            )
3054            return (base.normalizeFeatureSpecifier(fs), start + 2)
3055
3056    def parseFeatureSpecifier(self, specString: str) -> base.FeatureSpecifier:
3057        """
3058        Parses a full `FeatureSpecifier` from a single string. See
3059        `parseFeatureSpecifierFromTokens`.
3060
3061        >>> pf = ParseFormat()
3062        >>> pf.parseFeatureSpecifier('example')
3063        FeatureSpecifier(domain=None, within=[], feature='example', part=None)
3064        >>> pf.parseFeatureSpecifier('outer::example')
3065        FeatureSpecifier(domain=None, within=['outer'], feature='example',\
3066 part=None)
3067        >>> pf.parseFeatureSpecifier('example%%middle')
3068        FeatureSpecifier(domain=None, within=[], feature='example',\
3069 part='middle')
3070        >>> pf.parseFeatureSpecifier('domain//region::feature%%part')
3071        FeatureSpecifier(domain='domain', within=['region'],\
3072 feature='feature', part='part')
3073        >>> pf.parseFeatureSpecifier(
3074        ...     'outerRegion::midRegion::innerRegion::feature'
3075        ... )
3076        FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
3077 'innerRegion'], feature='feature', part=None)
3078        >>> pf.parseFeatureSpecifier('region::domain//feature')
3079        Traceback (most recent call last):
3080        ...
3081        exploration.parsing.InvalidFeatureSpecifierError...
3082        >>> pf.parseFeatureSpecifier('feature%%part1%%part2')
3083        Traceback (most recent call last):
3084        ...
3085        exploration.parsing.InvalidFeatureSpecifierError...
3086        >>> pf.parseFeatureSpecifier('domain1//domain2//feature')
3087        Traceback (most recent call last):
3088        ...
3089        exploration.parsing.InvalidFeatureSpecifierError...
3090        >>> # TODO: Issue warnings for these...
3091        >>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
3092        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3093        >>> pf.parseFeatureSpecifier('region::123')  # zone discarded
3094        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3095        >>> pf.parseFeatureSpecifier('123%%part')
3096        FeatureSpecifier(domain=None, within=[], feature=123, part='part')
3097        """
3098        tokens = self.lex(specString)
3099        result, rEnd = self.parseFeatureSpecifierFromTokens(tokens)
3100        if rEnd != len(tokens) - 1:
3101            raise InvalidFeatureSpecifierError(
3102                f"Feature specifier has extra stuff at end:"
3103                f" {tokens[rEnd + 1:]}"
3104            )
3105        else:
3106            return result
3107
3108    def normalizeFeatureSpecifier(
3109        self,
3110        spec: base.AnyFeatureSpecifier
3111    ) -> base.FeatureSpecifier:
3112        """
3113        Normalizes any kind of feature specifier into an official
3114        `FeatureSpecifier` tuple.
3115
3116        For example:
3117
3118        >>> pf = ParseFormat()
3119        >>> pf.normalizeFeatureSpecifier('town')
3120        FeatureSpecifier(domain=None, within=[], feature='town', part=None)
3121        >>> pf.normalizeFeatureSpecifier(5)
3122        FeatureSpecifier(domain=None, within=[], feature=5, part=None)
3123        >>> pf.parseFeatureSpecifierFromTokens(
3124        ...     [
3125        ...         'domain',
3126        ...         Lexeme.domainSeparator,
3127        ...         'region',
3128        ...         Lexeme.zoneSeparator,
3129        ...         'feature',
3130        ...         Lexeme.partSeparator,
3131        ...         'part'
3132        ...     ]
3133        ... )
3134        (FeatureSpecifier(domain='domain', within=['region'],\
3135 feature='feature', part='part'), 6)
3136        >>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
3137        FeatureSpecifier(domain='dom', within=['one', 'two'],\
3138 feature='three', part='middle')
3139        >>> pf.normalizeFeatureSpecifier(
3140        ...   base.FeatureSpecifier(None, ['region'], 'place', None)
3141        ... )
3142        FeatureSpecifier(domain=None, within=['region'], feature='place',\
3143 part=None)
3144        >>> fs = base.FeatureSpecifier(None, [], 'place', None)
3145        >>> ns = pf.normalizeFeatureSpecifier(fs)
3146        >>> ns is fs  # Doesn't create unnecessary clones
3147        True
3148        """
3149        if isinstance(spec, base.FeatureSpecifier):
3150            return spec
3151        elif isinstance(spec, base.FeatureID):
3152            return base.FeatureSpecifier(None, [], spec, None)
3153        elif isinstance(spec, str):
3154            return self.parseFeatureSpecifier(spec)
3155        else:
3156            raise TypeError(f"Invalid feature specifier type: '{type(spec)}'")
3157
3158    def unparseChallenge(self, challenge: base.Challenge) -> str:
3159        """
3160        Turns a `base.Challenge` into a string that can be turned back
3161        into an equivalent challenge by `parseChallenge`. For example:
3162
3163        >>> pf = ParseFormat()
3164        >>> c = base.challenge(
3165        ...     skills=base.BestSkill('brains', 'brawn'),
3166        ...     level=2,
3167        ...     success=[base.effect(set=('switch', 'on'))],
3168        ...     failure=[
3169        ...         base.effect(deactivate=True, delay=1),
3170        ...         base.effect(bounce=True)
3171        ...     ],
3172        ...     outcome=True
3173        ... )
3174        >>> r = pf.unparseChallenge(c)
3175        >>> r
3176        '<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
3177        >>> pf.parseChallenge(r) == c
3178        True
3179        >>> c2 = base.challenge(
3180        ...     skills=base.CombinedSkill(
3181        ...         -2,
3182        ...         base.ConditionalSkill(
3183        ...             base.ReqCapability('tough'),
3184        ...             base.BestSkill(1),
3185        ...             base.BestSkill(-1)
3186        ...         )
3187        ...     ),
3188        ...     level=-2,
3189        ...     success=[base.effect(gain='orb')],
3190        ...     failure=[],
3191        ...     outcome=None
3192        ... )
3193        >>> r2 = pf.unparseChallenge(c2)
3194        >>> r2
3195        '<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
3196        >>> # TODO: let this parse through without BestSkills...
3197        >>> pf.parseChallenge(r2) == c2
3198        True
3199        """
3200        lt = self.formatDict[Lexeme.angleLeft]
3201        gt = self.formatDict[Lexeme.angleRight]
3202        result = (
3203            lt + str(challenge['level']) + gt
3204          + challenge['skills'].unparse()
3205        )
3206        if challenge['outcome'] is True:
3207            result += gt
3208        result += self.unparseConsequence(challenge['success'])
3209        if challenge['outcome'] is False:
3210            result += gt
3211        result += self.unparseConsequence(challenge['failure'])
3212        return result
3213
3214    def unparseCondition(self, condition: base.Condition) -> str:
3215        """
3216        Given a `base.Condition` returns a string that would result in
3217        that condition if given to `parseCondition`. For example:
3218
3219        >>> pf = ParseFormat()
3220        >>> c = base.condition(
3221        ...     condition=base.ReqAny([
3222        ...         base.ReqCapability('brawny'),
3223        ...         base.ReqNot(base.ReqTokens('weights', 3))
3224        ...     ]),
3225        ...     consequence=[base.effect(gain='power')]
3226        ... )
3227        >>> r = pf.unparseCondition(c)
3228        >>> r
3229        '??((brawny|!(weights*3))){gain power}{}'
3230        >>> pf.parseCondition(r) == c
3231        True
3232        """
3233        return (
3234            self.formatDict[Lexeme.doubleQuestionmark]
3235          + self.formatDict[Lexeme.openParen]
3236          + condition['condition'].unparse()
3237          + self.formatDict[Lexeme.closeParen]
3238          + self.unparseConsequence(condition['consequence'])
3239          + self.unparseConsequence(condition['alternative'])
3240        )
3241
3242    def unparseConsequence(self, consequence: base.Consequence) -> str:
3243        """
3244        Given a `base.Consequence`, returns a string encoding of it,
3245        using the same format that `parseConsequence` will parse. Uses
3246        function-call-like syntax and curly braces to denote different
3247        sub-consequences. See also `SkillCombination.unparse` and
3248        `Requirement.unparse` For example:
3249
3250        >>> pf = ParseFormat()
3251        >>> c = [base.effect(gain='one'), base.effect(lose='one')]
3252        >>> pf.unparseConsequence(c)
3253        '{gain one; lose one}'
3254        >>> c = [
3255        ...     base.challenge(
3256        ...         skills=base.BestSkill('brains', 'brawn'),
3257        ...         level=2,
3258        ...         success=[base.effect(set=('switch', 'on'))],
3259        ...         failure=[
3260        ...             base.effect(deactivate=True, delay=1),
3261        ...             base.effect(bounce=True)
3262        ...         ],
3263        ...         outcome=True
3264        ...     )
3265        ... ]
3266        >>> pf.unparseConsequence(c)
3267        '{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
3268        >>> c[0]['outcome'] = False
3269        >>> pf.unparseConsequence(c)
3270        '{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
3271        >>> c[0]['outcome'] = None
3272        >>> pf.unparseConsequence(c)
3273        '{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
3274        >>> c = [
3275        ...     base.condition(
3276        ...         condition=base.ReqAny([
3277        ...             base.ReqCapability('brawny'),
3278        ...             base.ReqNot(base.ReqTokens('weights', 3))
3279        ...         ]),
3280        ...         consequence=[
3281        ...             base.challenge(
3282        ...                 skills=base.CombinedSkill('brains', 'brawn'),
3283        ...                 level=3,
3284        ...                 success=[base.effect(goto='home')],
3285        ...                 failure=[base.effect(bounce=True)],
3286        ...                 outcome=None
3287        ...             )
3288        ...         ]  # no alternative -> empty list
3289        ...     )
3290        ... ]
3291        >>> pf.unparseConsequence(c)
3292        '{??((brawny|!(weights*3))){\
3293<3>sum(brains, brawn){goto home}{bounce}}{}}'
3294        >>> c = [base.effect(gain='if(power){gain "mimic"}')]
3295        >>> # TODO: Make this work!
3296        >>> # pf.unparseConsequence(c)
3297
3298        '{gain "if(power){gain \\\\"mimic\\\\"}"}'
3299        """
3300        result = self.formatDict[Lexeme.openCurly]
3301        for item in consequence:
3302            if 'skills' in item:  # a Challenge
3303                item = cast(base.Challenge, item)
3304                result += self.unparseChallenge(item)
3305
3306            elif 'value' in item:  # an Effect
3307                item = cast(base.Effect, item)
3308                result += self.unparseEffect(item)
3309
3310            elif 'condition' in item:  # a Condition
3311                item = cast(base.Condition, item)
3312                result += self.unparseCondition(item)
3313
3314            else:  # bad dict
3315                raise TypeError(
3316                    f"Invalid consequence: items in the list must be"
3317                    f" Effects, Challenges, or Conditions (got a dictionary"
3318                    f" without 'skills', 'value', or 'condition' keys)."
3319                    f"\nGot item: {repr(item)}"
3320                )
3321            result += '; '
3322
3323        if result.endswith('; '):
3324            result = result[:-2]
3325
3326        return result + self.formatDict[Lexeme.closeCurly]
3327
3328    def parseMechanismSpecifierFromTokens(
3329        self,
3330        tokens: LexedTokens,
3331        start: int = 0
3332    ) -> Tuple[base.MechanismSpecifier, int]:
3333        """
3334        Parses a mechanism specifier starting at the specified position
3335        in the given tokens list. No ending position is specified, but
3336        instead this function returns a tuple containing the parsed
3337        `base.MechanismSpecifier` along with an index in the tokens list
3338        where the end of the specifier was found.
3339
3340        For example:
3341
3342        >>> pf = ParseFormat()
3343        >>> pf.parseMechanismSpecifierFromTokens(['m'])
3344        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3345 name='m'), 0)
3346        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
3347        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3348 name='a'), 0)
3349        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
3350        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3351 name='m'), 1)
3352        >>> pf.parseMechanismSpecifierFromTokens(
3353        ...     ['a', Lexeme.domainSeparator, 'm']
3354        ... )
3355        (MechanismSpecifier(domain='a', zone=None, decision=None,\
3356 name='m'), 2)
3357        >>> pf.parseMechanismSpecifierFromTokens(
3358        ...     ['a', Lexeme.zoneSeparator, 'm']
3359        ... )
3360        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3361 name='m'), 2)
3362        >>> pf.parseMechanismSpecifierFromTokens(
3363        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
3364        ... )
3365        (MechanismSpecifier(domain=None, zone='a', decision='b',\
3366 name='m'), 4)
3367        >>> pf.parseMechanismSpecifierFromTokens(
3368        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
3369        ... )
3370        (MechanismSpecifier(domain='a', zone=None, decision='b',\
3371 name='m'), 4)
3372        >>> pf.parseMechanismSpecifierFromTokens(
3373        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
3374        ... )
3375        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3376 name='b'), 2)
3377        >>> pf.parseMechanismSpecifierFromTokens(
3378        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3379        ...     1
3380        ... )
3381        Traceback (most recent call last):
3382        ...
3383        exploration.parsing.ParseError...
3384        >>> pf.parseMechanismSpecifierFromTokens(
3385        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3386        ...     2
3387        ... )
3388        (MechanismSpecifier(domain='b', zone=None, decision=None,\
3389 name='m'), 4)
3390        >>> pf.parseMechanismSpecifierFromTokens(
3391        ...     [
3392        ...         'a',
3393        ...         Lexeme.domainSeparator,
3394        ...         'b',
3395        ...         Lexeme.zoneSeparator,
3396        ...         'c',
3397        ...         Lexeme.zoneSeparator,
3398        ...         'm'
3399        ...     ]
3400        ... )
3401        (MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
3402        >>> pf.parseMechanismSpecifierFromTokens(
3403        ...     [
3404        ...         'a',
3405        ...         Lexeme.domainSeparator,
3406        ...         'b',
3407        ...         Lexeme.zoneSeparator,
3408        ...         'c',
3409        ...         Lexeme.zoneSeparator,
3410        ...         'm'
3411        ...     ],
3412        ...     2
3413        ... )
3414        (MechanismSpecifier(domain=None, zone='b', decision='c',\
3415 name='m'), 6)
3416        >>> pf.parseMechanismSpecifierFromTokens(
3417        ...     [
3418        ...         'a',
3419        ...         Lexeme.domainSeparator,
3420        ...         'b',
3421        ...         Lexeme.zoneSeparator,
3422        ...         'c',
3423        ...         Lexeme.zoneSeparator,
3424        ...         'm'
3425        ...     ],
3426        ...     4
3427        ... )
3428        (MechanismSpecifier(domain=None, zone=None, decision='c',\
3429 name='m'), 6)
3430        >>> pf.parseMechanismSpecifierFromTokens(
3431        ...     [
3432        ...         'roomB',
3433        ...         Lexeme.zoneSeparator,
3434        ...         'switch',
3435        ...         Lexeme.mechanismSeparator,
3436        ...         'on'
3437        ...     ]
3438        ... )
3439        (MechanismSpecifier(domain=None, zone=None, decision='roomB',\
3440 name='switch'), 2)
3441        """
3442        start, tEnd, nLeft = normalizeEnds(tokens, start, -1)
3443
3444        try:
3445            dSpec, dEnd = self.parseDecisionSpecifierFromTokens(
3446                tokens,
3447                start
3448            )
3449        except ParseError:
3450            raise ParseError(
3451                "Failed to parse mechanism specifier couldn't parse"
3452                " initial mechanism name."
3453            )
3454
3455        if isinstance(dSpec, int):
3456            raise ParseError(
3457                f"Invalid mechanism specifier: cannot use a decision ID"
3458                f" as the decision part. Got: {tokens[start:]}"
3459            )
3460            # TODO: Allow that?
3461
3462        mDomain = dSpec.domain
3463        if dEnd == tEnd or dEnd == tEnd - 1:
3464            return (
3465                base.MechanismSpecifier(
3466                    domain=mDomain,
3467                    zone=None,
3468                    decision=dSpec.zone,
3469                    name=dSpec.name
3470                ),
3471                dEnd
3472            )
3473
3474        sep = tokens[dEnd + 1]
3475        after = tokens[dEnd + 2]
3476
3477        if sep == Lexeme.zoneSeparator:
3478            if isinstance(after, Lexeme):
3479                return (
3480                    base.MechanismSpecifier(
3481                        domain=mDomain,
3482                        zone=None,
3483                        decision=dSpec.zone,
3484                        name=dSpec.name
3485                    ),
3486                    dEnd
3487                )
3488            else:
3489                return (
3490                    base.MechanismSpecifier(
3491                        domain=mDomain,
3492                        zone=dSpec.zone,
3493                        decision=dSpec.name,
3494                        name=after
3495                    ),
3496                    dEnd + 2
3497                )
3498        else:
3499            return (
3500                base.MechanismSpecifier(
3501                    domain=mDomain,
3502                    zone=None,
3503                    decision=dSpec.zone,
3504                    name=dSpec.name
3505                ),
3506                dEnd
3507            )
3508
3509    def groupReqTokens(
3510        self,
3511        tokens: LexedTokens,
3512        start: int = 0,
3513        end: int = -1
3514    ) -> GroupedTokens:
3515        """
3516        Groups tokens for a requirement, stripping out all parentheses
3517        but replacing parenthesized expressions with sub-lists of tokens.
3518
3519        For example:
3520
3521        >>> pf = ParseFormat()
3522        >>> pf.groupReqTokens(['jump'])
3523        ['jump']
3524        >>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
3525        Traceback (most recent call last):
3526        ...
3527        exploration.parsing.ParseError...
3528        >>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
3529        Traceback (most recent call last):
3530        ...
3531        exploration.parsing.ParseError...
3532        >>> pf.groupReqTokens(['jump', Lexeme.closeParen])
3533        Traceback (most recent call last):
3534        ...
3535        exploration.parsing.ParseError...
3536        >>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
3537        [['jump']]
3538        >>> pf.groupReqTokens(
3539        ...     [
3540        ...         Lexeme.openParen,
3541        ...         'jump',
3542        ...         Lexeme.orBar,
3543        ...         'climb',
3544        ...         Lexeme.closeParen,
3545        ...         Lexeme.ampersand,
3546        ...         'crawl',
3547        ...     ]
3548        ... )
3549        [['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>,\
3550 'crawl']
3551        """
3552        start, end, nTokens = normalizeEnds(tokens, start, end)
3553        if nTokens == 0:
3554            raise ParseError("Ran out of tokens.")
3555
3556        resultsStack: List[GroupedTokens] = [[]]
3557        here = start
3558        while here <= end:
3559            token = tokens[here]
3560            here += 1
3561            if token == Lexeme.closeParen:
3562                if len(resultsStack) == 1:
3563                    raise ParseError(
3564                        f"Too many closing parens at index {here - 1}"
3565                        f" in:\n{tokens[start:end + 1]}"
3566                    )
3567                else:
3568                    closed = resultsStack.pop()
3569                    resultsStack[-1].append(closed)
3570            elif token == Lexeme.openParen:
3571                resultsStack.append([])
3572            else:
3573                resultsStack[-1].append(token)
3574        if len(resultsStack) != 1:
3575            raise ParseError(
3576                f"Mismatched parentheses in tokens:"
3577                f"\n{tokens[start:end + 1]}"
3578            )
3579        return resultsStack[0]
3580
3581    def groupReqTokensByPrecedence(
3582        self,
3583        tokenGroups: GroupedTokens
3584    ) -> GroupedRequirementParts:
3585        """
3586        Re-groups requirement tokens that have been grouped using
3587        `groupReqTokens` according to operator precedence, effectively
3588        creating an equivalent result which would have been obtained by
3589        `groupReqTokens` if all possible non-redundant explicit
3590        parentheses had been included.
3591
3592        Also turns each leaf part into a `Requirement`.
3593
3594        TODO: Make this actually reasonably efficient T_T
3595
3596        Examples:
3597
3598        >>> pf = ParseFormat()
3599        >>> r = pf.parseRequirement('capability&roomB::switch:on')
3600        >>> pf.groupReqTokensByPrecedence(
3601        ...     [
3602        ...         ['jump', Lexeme.orBar, 'climb'],
3603        ...         Lexeme.ampersand,
3604        ...         Lexeme.notMarker,
3605        ...         'coin',
3606        ...         Lexeme.tokenCount,
3607        ...         '3'
3608        ...     ]
3609        ... )
3610        [\
3611[\
3612[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]],\
3613 <Lexeme.ampersand: ...>,\
3614 [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]\
3615]\
3616]
3617        """
3618        subgrouped: List[Union[Lexeme, str, GroupedRequirementParts]] = []
3619        # First recursively group all parenthesized expressions
3620        for i, item in enumerate(tokenGroups):
3621            if isinstance(item, list):
3622                subgrouped.append(self.groupReqTokensByPrecedence(item))
3623            else:
3624                subgrouped.append(item)
3625
3626        # Now process all leaf requirements
3627        leavesConverted: GroupedRequirementParts = []
3628        i = 0
3629        while i < len(subgrouped):
3630            gItem = subgrouped[i]
3631
3632            if isinstance(gItem, list):
3633                leavesConverted.append(gItem)
3634            elif isinstance(gItem, Lexeme):
3635                leavesConverted.append(gItem)
3636            elif i == len(subgrouped) - 1:
3637                if isinstance(gItem, Lexeme):
3638                    raise ParseError(
3639                        f"Lexeme at end of requirement. Grouped tokens:"
3640                        f"\n{tokenGroups}"
3641                    )
3642                else:
3643                    assert isinstance(gItem, str)
3644                    if gItem == 'X':
3645                        leavesConverted.append(base.ReqImpossible())
3646                    elif gItem == 'O':
3647                        leavesConverted.append(base.ReqNothing())
3648                    else:
3649                        leavesConverted.append(base.ReqCapability(gItem))
3650            else:
3651                assert isinstance(gItem, str)
3652                try:
3653                    # TODO: Avoid list copy here...
3654                    couldBeMechanismSpecifier: LexedTokens = []
3655                    for ii in range(i, len(subgrouped)):
3656                        lexemeOrStr = subgrouped[ii]
3657                        if isinstance(lexemeOrStr, (Lexeme, str)):
3658                            couldBeMechanismSpecifier.append(lexemeOrStr)
3659                        else:
3660                            break
3661                    mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
3662                        couldBeMechanismSpecifier
3663                    )
3664                    mEnd += i
3665                    if (
3666                        mEnd >= len(subgrouped) - 2
3667                     or subgrouped[mEnd + 1] != Lexeme.mechanismSeparator
3668                    ):
3669                        raise ParseError("Not a mechanism requirement.")
3670
3671                    mState = subgrouped[mEnd + 2]
3672                    if not isinstance(mState, base.MechanismState):
3673                        raise ParseError("Not a mechanism requirement.")
3674                    leavesConverted.append(base.ReqMechanism(mSpec, mState))
3675                    i = mEnd + 2  # + 1 will happen automatically below
3676                except ParseError:
3677                    following = subgrouped[i + 1]
3678                    if following in (
3679                        Lexeme.tokenCount,
3680                        Lexeme.mechanismSeparator,
3681                        Lexeme.wigglyLine,
3682                        Lexeme.skillLevel
3683                    ):
3684                        if (
3685                            i == len(subgrouped) - 2
3686                         or isinstance(subgrouped[i + 2], Lexeme)
3687                        ):
3688                            if following == Lexeme.wigglyLine:
3689                                # Default tag value is 1
3690                                leavesConverted.append(base.ReqTag(gItem, 1))
3691                                i += 1  # another +1 automatic below
3692                            else:
3693                                raise ParseError(
3694                                    f"Lexeme at end of requirement. Grouped"
3695                                    f" tokens:\n{tokenGroups}"
3696                                )
3697                        else:
3698                            afterwards = subgrouped[i + 2]
3699                            if not isinstance(afterwards, str):
3700                                raise ParseError(
3701                                    f"Lexeme after token/mechanism/tag/skill"
3702                                    f" separator at index {i}."
3703                                    f" Grouped tokens:\n{tokenGroups}"
3704                                )
3705                            i += 2  # another +1 automatic below
3706                            if following == Lexeme.tokenCount:
3707                                try:
3708                                    tCount = int(afterwards)
3709                                except ValueError:
3710                                    raise ParseError(
3711                                        f"Token count could not be"
3712                                        f" parsed as an integer:"
3713                                        f" {afterwards!r}. Grouped"
3714                                        f" tokens:\n{tokenGroups}"
3715                                    )
3716                                leavesConverted.append(
3717                                    base.ReqTokens(gItem, tCount)
3718                                )
3719                            elif following == Lexeme.mechanismSeparator:
3720                                leavesConverted.append(
3721                                    base.ReqMechanism(gItem, afterwards)
3722                                )
3723                            elif following == Lexeme.wigglyLine:
3724                                tVal = self.parseTagValue(afterwards)
3725                                leavesConverted.append(
3726                                    base.ReqTag(gItem, tVal)
3727                                )
3728                            else:
3729                                assert following == Lexeme.skillLevel
3730                                try:
3731                                    sLevel = int(afterwards)
3732                                except ValueError:
3733                                    raise ParseError(
3734                                        f"Skill level could not be"
3735                                        f" parsed as an integer:"
3736                                        f" {afterwards!r}. Grouped"
3737                                        f" tokens:\n{tokenGroups}"
3738                                    )
3739                                leavesConverted.append(
3740                                    base.ReqLevel(gItem, sLevel)
3741                                )
3742                    else:
3743                        if gItem == 'X':
3744                            leavesConverted.append(base.ReqImpossible())
3745                        elif gItem == 'O':
3746                            leavesConverted.append(base.ReqNothing())
3747                        else:
3748                            leavesConverted.append(
3749                                base.ReqCapability(gItem)
3750                            )
3751
3752            # Finally, increment our index:
3753            i += 1
3754
3755        # Now group all NOT operators
3756        i = 0
3757        notsGrouped: GroupedRequirementParts = []
3758        while i < len(leavesConverted):
3759            leafItem = leavesConverted[i]
3760            group = []
3761            while leafItem == Lexeme.notMarker:
3762                group.append(leafItem)
3763                i += 1
3764                if i >= len(leavesConverted):
3765                    raise ParseError(
3766                        f"NOT at end of tokens:\n{leavesConverted}"
3767                    )
3768                leafItem = leavesConverted[i]
3769            if group == []:
3770                notsGrouped.append(leafItem)
3771                i += 1
3772            else:
3773                group.append(leafItem)
3774                i += 1
3775                notsGrouped.append(group)
3776
3777        # Next group all AND operators
3778        i = 0
3779        andsGrouped: GroupedRequirementParts = []
3780        while i < len(notsGrouped):
3781            notGroupItem = notsGrouped[i]
3782            if notGroupItem == Lexeme.ampersand:
3783                if i == len(notsGrouped) - 1:
3784                    raise ParseError(
3785                        f"AND at end of group in tokens:"
3786                        f"\n{tokenGroups}"
3787                        f"Which had been grouped into:"
3788                        f"\n{notsGrouped}"
3789                    )
3790                itemAfter = notsGrouped[i + 1]
3791                if isinstance(itemAfter, Lexeme):
3792                    raise ParseError(
3793                        f"Lexeme after AND in of group in tokens:"
3794                        f"\n{tokenGroups}"
3795                        f"Which had been grouped into:"
3796                        f"\n{notsGrouped}"
3797                    )
3798                assert isinstance(itemAfter, (base.Requirement, list))
3799                prev = andsGrouped[-1]
3800                if (
3801                    isinstance(prev, list)
3802                and len(prev) > 2
3803                and prev[1] == Lexeme.ampersand
3804                ):
3805                    prev.extend(notsGrouped[i:i + 2])
3806                    i += 1  # with an extra +1 below
3807                else:
3808                    andsGrouped.append(
3809                        [andsGrouped.pop()] + notsGrouped[i:i + 2]
3810                    )
3811                    i += 1 # extra +1 below
3812            else:
3813                andsGrouped.append(notGroupItem)
3814            i += 1
3815
3816        # Finally check that we only have OR operators left over
3817        i = 0
3818        finalResult: GroupedRequirementParts = []
3819        while i < len(andsGrouped):
3820            andGroupItem = andsGrouped[i]
3821            if andGroupItem == Lexeme.orBar:
3822                if i == len(andsGrouped) - 1:
3823                    raise ParseError(
3824                        f"OR at end of group in tokens:"
3825                        f"\n{tokenGroups}"
3826                        f"Which had been grouped into:"
3827                        f"\n{andsGrouped}"
3828                    )
3829                itemAfter = andsGrouped[i + 1]
3830                if isinstance(itemAfter, Lexeme):
3831                    raise ParseError(
3832                        f"Lexeme after OR in of group in tokens:"
3833                        f"\n{tokenGroups}"
3834                        f"Which had been grouped into:"
3835                        f"\n{andsGrouped}"
3836                    )
3837                assert isinstance(itemAfter, (base.Requirement, list))
3838                prev = finalResult[-1]
3839                if (
3840                    isinstance(prev, list)
3841                and len(prev) > 2
3842                and prev[1] == Lexeme.orBar
3843                ):
3844                    prev.extend(andsGrouped[i:i + 2])
3845                    i += 1  # with an extra +1 below
3846                else:
3847                    finalResult.append(
3848                        [finalResult.pop()] + andsGrouped[i:i + 2]
3849                    )
3850                    i += 1 # extra +1 below
3851            elif isinstance(andGroupItem, Lexeme):
3852                raise ParseError(
3853                    f"Leftover lexeme when grouping ORs at index {i}"
3854                    f" in grouped tokens:\n{andsGrouped}"
3855                    f"\nOriginal tokens were:\n{tokenGroups}"
3856                )
3857            else:
3858                finalResult.append(andGroupItem)
3859            i += 1
3860
3861        return finalResult
3862
3863    def parseRequirementFromRegroupedTokens(
3864        self,
3865        reqGroups: GroupedRequirementParts
3866    ) -> base.Requirement:
3867        """
3868        Recursive parser that works once tokens have been turned into
3869        requirements at the leaves and grouped by operator precedence
3870        otherwise (see `groupReqTokensByPrecedence`).
3871
3872        TODO: Simply by just doing this while grouping... ?
3873        """
3874        if len(reqGroups) == 0:
3875            raise ParseError("Ran out of tokens.")
3876
3877        elif len(reqGroups) == 1:
3878            only = reqGroups[0]
3879            if isinstance(only, list):
3880                return self.parseRequirementFromRegroupedTokens(only)
3881            elif isinstance(only, base.Requirement):
3882                return only
3883            else:
3884                raise ParseError(f"Invalid singleton group:\n{only}")
3885        elif reqGroups[0] == Lexeme.notMarker:
3886            if (
3887                not all(x == Lexeme.notMarker for x in reqGroups[:-1])
3888             or not isinstance(reqGroups[-1], (list, base.Requirement))
3889            ):
3890                raise ParseError(f"Invalid negation group:\n{reqGroups}")
3891            result = reqGroups[-1]
3892            if isinstance(result, list):
3893                result = self.parseRequirementFromRegroupedTokens(result)
3894            assert isinstance(result, base.Requirement)
3895            for i in range(len(reqGroups) - 1):
3896                result = base.ReqNot(result)
3897            return result
3898        elif len(reqGroups) % 2 == 0:
3899            raise ParseError(f"Even-length non-negation group:\n{reqGroups}")
3900        else:
3901            if (
3902                reqGroups[1] not in (Lexeme.ampersand, Lexeme.orBar)
3903             or not all(
3904                    reqGroups[i] == reqGroups[1]
3905                    for i in range(1, len(reqGroups), 2)
3906                )
3907            ):
3908                raise ParseError(
3909                    f"Inconsistent operator(s) in group:\n{reqGroups}"
3910                )
3911            op = reqGroups[1]
3912            operands = [
3913                (
3914                    self.parseRequirementFromRegroupedTokens(x)
3915                    if isinstance(x, list)
3916                    else x
3917                )
3918                for x in reqGroups[::2]
3919            ]
3920            if not all(isinstance(x, base.Requirement) for x in operands):
3921                raise ParseError(
3922                    f"Item not reducible to Requirement in AND group:"
3923                    f"\n{reqGroups}"
3924                )
3925            reqSequence = cast(Sequence[base.Requirement], operands)
3926            if op == Lexeme.ampersand:
3927                return base.ReqAll(reqSequence).flatten()
3928            else:
3929                assert op == Lexeme.orBar
3930                return base.ReqAny(reqSequence).flatten()
3931
3932    def parseRequirementFromGroupedTokens(
3933        self,
3934        tokenGroups: GroupedTokens
3935    ) -> base.Requirement:
3936        """
3937        Parses a `base.Requirement` from a pre-grouped tokens list (see
3938        `groupReqTokens`). Uses the 'orBar', 'ampersand', 'notMarker',
3939        'tokenCount', and 'mechanismSeparator' `Lexeme`s to provide
3940        'or', 'and', and 'not' operators along with distinguishing
3941        between capabilities, tokens, and mechanisms.
3942
3943        Precedence ordering is not, then and, then or, but you are
3944        encouraged to use parentheses for explicit grouping (the
3945        'openParen' and 'closeParen' `Lexeme`s, although these must be
3946        handled by `groupReqTokens` so this function won't see them
3947        directly).
3948
3949        You can also use 'X' (without quotes) for a never-satisfied
3950        requirement, and 'O' (without quotes) for an always-satisfied
3951        requirement.
3952
3953        Note that when '!' is applied to a token requirement it flips
3954        the sense of the integer from 'must have at least this many' to
3955        'must have strictly less than this many'.
3956
3957        Raises a `ParseError` if the grouped tokens it is given cannot
3958        be parsed as a `Requirement`.
3959
3960        Examples:
3961
3962        >>> pf = ParseFormat()
3963        >>> pf.parseRequirementFromGroupedTokens(['capability'])
3964        ReqCapability('capability')
3965        >>> pf.parseRequirementFromGroupedTokens(
3966        ...     ['token', Lexeme.tokenCount, '3']
3967        ... )
3968        ReqTokens('token', 3)
3969        >>> pf.parseRequirementFromGroupedTokens(
3970        ...     ['mechanism', Lexeme.mechanismSeparator, 'state']
3971        ... )
3972        ReqMechanism('mechanism', 'state')
3973        >>> pf.parseRequirementFromGroupedTokens(
3974        ...     ['capability', Lexeme.orBar, 'token',
3975        ...      Lexeme.tokenCount, '3']
3976        ... )
3977        ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
3978        >>> pf.parseRequirementFromGroupedTokens(
3979        ...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
3980        ... )
3981        ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]),\
3982 ReqCapability('three')])
3983        >>> pf.parseRequirementFromGroupedTokens(
3984        ...     [
3985        ...         'one',
3986        ...         Lexeme.ampersand,
3987        ...         [
3988        ...              'two',
3989        ...              Lexeme.orBar,
3990        ...              'three'
3991        ...         ]
3992        ...     ]
3993        ... )
3994        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
3995 ReqCapability('three')])])
3996        >>> pf.parseRequirementFromTokens(['X'])
3997        ReqImpossible()
3998        >>> pf.parseRequirementFromTokens(['O'])
3999        ReqNothing()
4000        >>> pf.parseRequirementFromTokens(
4001        ...     [Lexeme.openParen, 'O', Lexeme.closeParen]
4002        ... )
4003        ReqNothing()
4004        """
4005        if len(tokenGroups) == 0:
4006            raise ParseError("Ran out of tokens.")
4007
4008        reGrouped = self.groupReqTokensByPrecedence(tokenGroups)
4009
4010        return self.parseRequirementFromRegroupedTokens(reGrouped)
4011
4012    def parseRequirementFromTokens(
4013        self,
4014        tokens: LexedTokens,
4015        start: int = 0,
4016        end: int = -1
4017    ) -> base.Requirement:
4018        """
4019        Parses a requirement from `LexedTokens` by grouping them first
4020        and then using `parseRequirementFromGroupedTokens`.
4021
4022        For example:
4023
4024        >>> pf = ParseFormat()
4025        >>> pf.parseRequirementFromTokens(
4026        ...     [
4027        ...         'one',
4028        ...         Lexeme.ampersand,
4029        ...         Lexeme.openParen,
4030        ...         'two',
4031        ...         Lexeme.orBar,
4032        ...         'three',
4033        ...         Lexeme.closeParen
4034        ...     ]
4035        ... )
4036        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4037 ReqCapability('three')])])
4038        """
4039        grouped = self.groupReqTokens(tokens, start, end)
4040        return self.parseRequirementFromGroupedTokens(grouped)
4041
4042    def parseRequirement(self, encoded: str) -> base.Requirement:
4043        """
4044        Parses a `base.Requirement` from a string by calling `lex` and
4045        then feeding it into `ParseFormat.parseRequirementFromTokens`.
4046        As stated in `parseRequirementFromTokens`, the precedence
4047        binding order is NOT, then AND, then OR.
4048
4049        For example:
4050
4051        >>> pf = ParseFormat()
4052        >>> pf.parseRequirement('! coin * 3')
4053        ReqNot(ReqTokens('coin', 3))
4054        >>> pf.parseRequirement(
4055        ...     '  oneWord | "two words"|"three  words words" '
4056        ... )
4057        ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'),\
4058 ReqCapability('"three  words words"')])
4059        >>> pf.parseRequirement('words-with-dashes')
4060        ReqCapability('words-with-dashes')
4061        >>> r = pf.parseRequirement('capability&roomB::switch:on')
4062        >>> r
4063        ReqAll([ReqCapability('capability'),\
4064 ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB',\
4065 name='switch'), 'on')])
4066        >>> r.unparse()
4067        '(capability&roomB::switch:on)'
4068        >>> pf.parseRequirement('!!!one')
4069        ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
4070        >>> pf.parseRequirement('domain//zone::where::mechanism:state')
4071        ReqMechanism(MechanismSpecifier(domain='domain', zone='zone',\
4072 decision='where', name='mechanism'), 'state')
4073        >>> pf.parseRequirement('domain//mechanism:state')
4074        ReqMechanism(MechanismSpecifier(domain='domain', zone=None,\
4075 decision=None, name='mechanism'), 'state')
4076        >>> pf.parseRequirement('where::mechanism:state')
4077        ReqMechanism(MechanismSpecifier(domain=None, zone=None,\
4078 decision='where', name='mechanism'), 'state')
4079        >>> pf.parseRequirement('zone::where::mechanism:state')
4080        ReqMechanism(MechanismSpecifier(domain=None, zone='zone',\
4081 decision='where', name='mechanism'), 'state')
4082        >>> pf.parseRequirement('tag~')
4083        ReqTag('tag', 1)
4084        >>> pf.parseRequirement('tag~&tag2~')
4085        ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
4086        >>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
4087        ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3),\
4088 ReqTag('tag', 3.5), ReqLevel('skill', 3)])
4089        >>> pf.parseRequirement('tag~True|tag~False|tag~None')
4090        ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])
4091
4092        Precedence examples:
4093
4094        >>> pf.parseRequirement('A|B&C')
4095        ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'),\
4096 ReqCapability('C')])])
4097        >>> pf.parseRequirement('A&B|C')
4098        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4099 ReqCapability('C')])
4100        >>> pf.parseRequirement('(A&B)|C')
4101        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4102 ReqCapability('C')])
4103        >>> pf.parseRequirement('(A&B|C)&D')
4104        ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4105 ReqCapability('C')]), ReqCapability('D')])
4106
4107        Error examples:
4108
4109        >>> pf.parseRequirement('one ! Word')
4110        Traceback (most recent call last):
4111        ...
4112        exploration.parsing.ParseError...
4113        >>> pf.parseRequirement('a|')
4114        Traceback (most recent call last):
4115        ...
4116        exploration.parsing.ParseError...
4117        >>> pf.parseRequirement('b!')
4118        Traceback (most recent call last):
4119        ...
4120        exploration.parsing.ParseError...
4121        >>> pf.parseRequirement('*emph*')
4122        Traceback (most recent call last):
4123        ...
4124        exploration.parsing.ParseError...
4125        >>> pf.parseRequirement('one&&two')
4126        Traceback (most recent call last):
4127        ...
4128        exploration.parsing.ParseError...
4129        >>> pf.parseRequirement('one!|two')
4130        Traceback (most recent call last):
4131        ...
4132        exploration.parsing.ParseError...
4133        >>> pf.parseRequirement('one*two')
4134        Traceback (most recent call last):
4135        ...
4136        exploration.parsing.ParseError...
4137        >>> pf.parseRequirement('one*')
4138        Traceback (most recent call last):
4139        ...
4140        exploration.parsing.ParseError...
4141        >>> pf.parseRequirement('()')
4142        Traceback (most recent call last):
4143        ...
4144        exploration.parsing.ParseError...
4145        >>> pf.parseRequirement('(one)*3')
4146        Traceback (most recent call last):
4147        ...
4148        exploration.parsing.ParseError...
4149        >>> pf.parseRequirement('a:')
4150        Traceback (most recent call last):
4151        ...
4152        exploration.parsing.ParseError...
4153        >>> pf.parseRequirement('a:b:c')
4154        Traceback (most recent call last):
4155        ...
4156        exploration.parsing.ParseError...
4157        >>> pf.parseRequirement('where::capability')
4158        Traceback (most recent call last):
4159        ...
4160        exploration.parsing.ParseError...
4161        """
4162        return self.parseRequirementFromTokens(
4163            lex(encoded, self.reverseFormat)
4164        )
4165
4166    def parseSkillCombinationFromTokens(
4167        self,
4168        tokens: LexedTokens,
4169        start: int = 0,
4170        end: int = -1
4171    ) -> Union[base.Skill, base.SkillCombination]:
4172        """
4173        Parses a skill combination from the specified range within the
4174        given tokens list. If just a single string token is selected, it
4175        will be returned as a `base.BestSkill` with just that skill
4176        inside.
4177
4178        For example:
4179
4180        >>> pf = ParseFormat()
4181        >>> pf.parseSkillCombinationFromTokens(['climbing'])
4182        BestSkill('climbing')
4183        >>> tokens = [
4184        ...     'best',
4185        ...     Lexeme.openParen,
4186        ...     'brains',
4187        ...     Lexeme.sepOrDelay,
4188        ...     'brawn',
4189        ...     Lexeme.closeParen,
4190        ... ]
4191        >>> pf.parseSkillCombinationFromTokens(tokens)
4192        BestSkill('brains', 'brawn')
4193        >>> tokens[2] = '3'  # not a lexeme so it's a string
4194        >>> pf.parseSkillCombinationFromTokens(tokens)
4195        BestSkill(3, 'brawn')
4196        >>> tokens = [
4197        ...     Lexeme.wigglyLine,
4198        ...     Lexeme.wigglyLine,
4199        ...     'yes',
4200        ... ]
4201        >>> pf.parseSkillCombinationFromTokens(tokens)
4202        InverseSkill(InverseSkill('yes'))
4203        """
4204        start, end, nTokens = normalizeEnds(tokens, start, end)
4205
4206        first = tokens[start]
4207        if nTokens == 1:
4208            if isinstance(first, base.Skill):
4209                try:
4210                    level = int(first)
4211                    return base.BestSkill(level)
4212                except ValueError:
4213                    return base.BestSkill(first)
4214            else:
4215                raise ParseError(
4216                    "Invalid SkillCombination:\n{tokens[start:end + 1]"
4217                )
4218
4219        if first == Lexeme.wigglyLine:
4220            inv = self.parseSkillCombinationFromTokens(
4221                tokens,
4222                start + 1,
4223                end
4224            )
4225            if isinstance(inv, base.BestSkill) and len(inv.skills) == 1:
4226                return base.InverseSkill(inv.skills[0])
4227            else:
4228                return base.InverseSkill(inv)
4229
4230        second = tokens[start + 1]
4231        if second != Lexeme.openParen:
4232            raise ParseError(
4233                f"Invalid SkillCombination (missing paren):"
4234                f"\n{tokens[start:end + 1]}"
4235            )
4236
4237        parenEnd = self.matchingBrace(
4238            tokens,
4239            start + 1,
4240            Lexeme.openParen,
4241            Lexeme.closeParen
4242        )
4243        if parenEnd != end:
4244            raise ParseError(
4245                f"Extra junk after SkillCombination:"
4246                f"\n{tokens[parenEnd + 1:end + 1]}"
4247            )
4248
4249        if first == 'if':
4250            parts = list(
4251                findSeparatedParts(
4252                    tokens,
4253                    Lexeme.sepOrDelay,
4254                    start + 2,
4255                    end - 1,
4256                    Lexeme.openParen,
4257                    Lexeme.closeParen
4258                )
4259            )
4260            if len(parts) != 3:
4261                raise ParseError(
4262                    f"Wrong number of parts for ConditionalSkill (needs"
4263                    f" 3, got {len(parts)}:"
4264                    f"\n{tokens[start + 2:end]}"
4265                )
4266            reqStart, reqEnd = parts[0]
4267            ifStart, ifEnd = parts[1]
4268            elseStart, elseEnd = parts[2]
4269            return base.ConditionalSkill(
4270                self.parseRequirementFromTokens(tokens, reqStart, reqEnd),
4271                self.parseSkillCombinationFromTokens(tokens, ifStart, ifEnd),
4272                self.parseSkillCombinationFromTokens(
4273                    tokens,
4274                    elseStart,
4275                    elseEnd
4276                ),
4277            )
4278        elif first in ('sum', 'best', 'worst'):
4279            make: type[base.SkillCombination]
4280            if first == 'sum':
4281                make = base.CombinedSkill
4282            elif first == 'best':
4283                make = base.BestSkill
4284            else:
4285                make = base.WorstSkill
4286
4287            subs = []
4288            for partStart, partEnd in findSeparatedParts(
4289                tokens,
4290                Lexeme.sepOrDelay,
4291                start + 2,
4292                end - 1,
4293                Lexeme.openParen,
4294                Lexeme.closeParen
4295            ):
4296                sub = self.parseSkillCombinationFromTokens(
4297                    tokens,
4298                    partStart,
4299                    partEnd
4300                )
4301                if (
4302                    isinstance(sub, base.BestSkill)
4303                and len(sub.skills) == 1
4304                ):
4305                    subs.append(sub.skills[0])
4306                else:
4307                    subs.append(sub)
4308
4309            return make(*subs)
4310        else:
4311            raise ParseError(
4312                "Invalid SkillCombination:\n{tokens[start:end + 1]"
4313            )
4314
4315    def parseSkillCombination(
4316        self,
4317        encoded: str
4318    ) -> base.SkillCombination:
4319        """
4320        Parses a `SkillCombination` from a string. Calls `lex` and then
4321        `parseSkillCombinationFromTokens`.
4322        """
4323        result = self.parseSkillCombinationFromTokens(
4324            lex(encoded, self.reverseFormat)
4325        )
4326        if not isinstance(result, base.SkillCombination):
4327            return base.BestSkill(result)
4328        else:
4329            return result
4330
4331    def parseConditionFromTokens(
4332        self,
4333        tokens: LexedTokens,
4334        start: int = 0,
4335        end: int = -1
4336    ) -> base.Condition:
4337        """
4338        Parses a `base.Condition` from a lexed tokens list. For example:
4339
4340        >>> pf = ParseFormat()
4341        >>> tokens = [
4342        ...     Lexeme.doubleQuestionmark,
4343        ...     Lexeme.openParen,
4344        ...     "fire",
4345        ...     Lexeme.ampersand,
4346        ...     "water",
4347        ...     Lexeme.closeParen,
4348        ...     Lexeme.openCurly,
4349        ...     "gain",
4350        ...     "wind",
4351        ...     Lexeme.closeCurly,
4352        ...     Lexeme.openCurly,
4353        ...     Lexeme.closeCurly,
4354        ... ]
4355        >>> pf.parseConditionFromTokens(tokens) == base.condition(
4356        ...     condition=base.ReqAll([
4357        ...         base.ReqCapability('fire'),
4358        ...         base.ReqCapability('water')
4359        ...     ]),
4360        ...     consequence=[base.effect(gain='wind')]
4361        ... )
4362        True
4363        """
4364        start, end, nTokens = normalizeEnds(tokens, start, end)
4365        if nTokens < 8:
4366            raise ParseError(
4367                f"A Condition requires at least 8 tokens (got {nTokens})."
4368            )
4369        if tokens[start] != Lexeme.doubleQuestionmark:
4370            raise ParseError(
4371                f"A Condition must start with"
4372                f" {repr(self.formatDict[Lexeme.doubleQuestionmark])}"
4373            )
4374        try:
4375            consequenceStart = tokens.index(Lexeme.openCurly, start)
4376        except ValueError:
4377            raise ParseError("A condition must include a consequence block.")
4378        consequenceEnd = self.matchingBrace(tokens, consequenceStart)
4379        altStart = consequenceEnd + 1
4380        altEnd = self.matchingBrace(tokens, altStart)
4381
4382        if altEnd != end:
4383            raise ParseError(
4384                f"Junk after condition:\n{tokens[altEnd + 1: end + 1]}"
4385            )
4386
4387        return base.condition(
4388            condition=self.parseRequirementFromTokens(
4389                tokens,
4390                start + 1,
4391                consequenceStart - 1
4392            ),
4393            consequence=self.parseConsequenceFromTokens(
4394                tokens,
4395                consequenceStart,
4396                consequenceEnd
4397            ),
4398            alternative=self.parseConsequenceFromTokens(
4399                tokens,
4400                altStart,
4401                altEnd
4402            )
4403        )
4404
4405    def parseCondition(
4406        self,
4407        encoded: str
4408    ) -> base.Condition:
4409        """
4410        Lexes the given string and then calls `parseConditionFromTokens`
4411        to return a `base.Condition`.
4412        """
4413        return self.parseConditionFromTokens(
4414            lex(encoded, self.reverseFormat)
4415        )
4416
4417    def parseChallengeFromTokens(
4418        self,
4419        tokens: LexedTokens,
4420        start: int = 0,
4421        end: int = -1
4422    ) -> base.Challenge:
4423        """
4424        Parses a `base.Challenge` from a lexed tokens list.
4425
4426        For example:
4427
4428        >>> pf = ParseFormat()
4429        >>> tokens = [
4430        ...     Lexeme.angleLeft,
4431        ...     '2',
4432        ...     Lexeme.angleRight,
4433        ...     'best',
4434        ...     Lexeme.openParen,
4435        ...     "chess",
4436        ...     Lexeme.sepOrDelay,
4437        ...     "checkers",
4438        ...     Lexeme.closeParen,
4439        ...     Lexeme.openCurly,
4440        ...     "gain",
4441        ...     "coin",
4442        ...     Lexeme.tokenCount,
4443        ...     "5",
4444        ...     Lexeme.closeCurly,
4445        ...     Lexeme.angleRight,
4446        ...     Lexeme.openCurly,
4447        ...     "lose",
4448        ...     "coin",
4449        ...     Lexeme.tokenCount,
4450        ...     "5",
4451        ...     Lexeme.closeCurly,
4452        ... ]
4453        >>> c = pf.parseChallengeFromTokens(tokens)
4454        >>> c['skills'] == base.BestSkill('chess', 'checkers')
4455        True
4456        >>> c['level']
4457        2
4458        >>> c['success'] == [base.effect(gain=('coin', 5))]
4459        True
4460        >>> c['failure'] == [base.effect(lose=('coin', 5))]
4461        True
4462        >>> c['outcome']
4463        False
4464        >>> c == base.challenge(
4465        ...     skills=base.BestSkill('chess', 'checkers'),
4466        ...     level=2,
4467        ...     success=[base.effect(gain=('coin', 5))],
4468        ...     failure=[base.effect(lose=('coin', 5))],
4469        ...     outcome=False
4470        ... )
4471        True
4472        >>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
4473        >>> c == pf.parseChallengeFromTokens(t2, 1, -2)
4474        True
4475        """
4476        start, end, nTokens = normalizeEnds(tokens, start, end)
4477        if nTokens < 8:
4478            raise ParseError(
4479                f"Not enough tokens for a challenge: {nTokens}"
4480            )
4481        if tokens[start] != Lexeme.angleLeft:
4482            raise ParseError(
4483                f"Challenge must start with"
4484                f" {repr(self.formatDict[Lexeme.angleLeft])}"
4485            )
4486        levelStr = tokens[start + 1]
4487        if isinstance(levelStr, Lexeme):
4488            raise ParseError(
4489                f"Challenge must start with a level in angle brackets"
4490                f" (got {repr(self.formatDict[levelStr])})."
4491            )
4492        if tokens[start + 2] != Lexeme.angleRight:
4493            raise ParseError(
4494                f"Challenge must include"
4495                f" {repr(self.formatDict[Lexeme.angleRight])} after"
4496                f" the level."
4497            )
4498        try:
4499            level = int(levelStr)
4500        except ValueError:
4501            raise ParseError(
4502                f"Challenge level must be an integer (got"
4503                f" {repr(tokens[start + 1])}."
4504            )
4505        try:
4506            successStart = tokens.index(Lexeme.openCurly, start)
4507            skillsEnd = successStart - 1
4508        except ValueError:
4509            raise ParseError("A challenge must include a consequence block.")
4510
4511        outcome: Optional[bool] = None
4512        if tokens[skillsEnd] == Lexeme.angleRight:
4513            skillsEnd -= 1
4514            outcome = True
4515        successEnd = self.matchingBrace(tokens, successStart)
4516        failStart = successEnd + 1
4517        if tokens[failStart] == Lexeme.angleRight:
4518            failStart += 1
4519            if outcome is not None:
4520                raise ParseError(
4521                    "Cannot indicate both success and failure as"
4522                    " outcomes in a challenge."
4523                )
4524            outcome = False
4525        failEnd = self.matchingBrace(tokens, failStart)
4526
4527        if failEnd != end:
4528            raise ParseError(
4529                f"Junk after condition:\n{tokens[failEnd + 1:end + 1]}"
4530            )
4531
4532        skills = self.parseSkillCombinationFromTokens(
4533            tokens,
4534            start + 3,
4535            skillsEnd
4536        )
4537        if isinstance(skills, base.Skill):
4538            skills = base.BestSkill(skills)
4539
4540        return base.challenge(
4541            level=level,
4542            outcome=outcome,
4543            skills=skills,
4544            success=self.parseConsequenceFromTokens(
4545                tokens[successStart:successEnd + 1]
4546            ),
4547            failure=self.parseConsequenceFromTokens(
4548                tokens[failStart:failEnd + 1]
4549            )
4550        )
4551
4552    def parseChallenge(
4553        self,
4554        encoded: str
4555    ) -> base.Challenge:
4556        """
4557        Lexes the given string and then calls `parseChallengeFromTokens`
4558        to return a `base.Challenge`.
4559        """
4560        return self.parseChallengeFromTokens(
4561            lex(encoded, self.reverseFormat)
4562        )
4563
4564    def parseConsequenceFromTokens(
4565        self,
4566        tokens: LexedTokens,
4567        start: int = 0,
4568        end: int = -1
4569    ) -> base.Consequence:
4570        """
4571        Parses a consequence from a lexed token list. If start and/or end
4572        are specified, only processes the part of the list between those
4573        two indices (inclusive). Use `lex` to turn a string into a
4574        `LexedTokens` list (or use `ParseFormat.parseConsequence` which
4575        does that for you).
4576
4577        An example:
4578
4579        >>> pf = ParseFormat()
4580        >>> tokens = [
4581        ...     Lexeme.openCurly,
4582        ...     'gain',
4583        ...     'power',
4584        ...     Lexeme.closeCurly
4585        ... ]
4586        >>> c = pf.parseConsequenceFromTokens(tokens)
4587        >>> c == [base.effect(gain='power')]
4588        True
4589        >>> tokens.append('hi')
4590        >>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
4591        True
4592        >>> c == pf.parseConsequenceFromTokens(tokens, end=3)
4593        True
4594        """
4595        start, end, nTokens = normalizeEnds(tokens, start, end)
4596
4597        if nTokens < 2:
4598            raise ParseError("Consequence must have at least two tokens.")
4599
4600        if tokens[start] != Lexeme.openCurly:
4601            raise ParseError(
4602                f"Consequence must start with an open curly brace:"
4603                f" {repr(self.formatDict[Lexeme.openCurly])}."
4604            )
4605
4606        if tokens[end] != Lexeme.closeCurly:
4607            raise ParseError(
4608                f"Consequence must end with a closing curly brace:"
4609                f" {repr(self.formatDict[Lexeme.closeCurly])}."
4610            )
4611
4612        if nTokens == 2:
4613            return []
4614
4615        result: base.Consequence = []
4616        for partStart, partEnd in findSeparatedParts(
4617            tokens,
4618            Lexeme.consequenceSeparator,
4619            start + 1,
4620            end - 1,
4621            Lexeme.openCurly,
4622            Lexeme.closeCurly
4623        ):
4624            if partEnd - partStart < 0:
4625                raise ParseError("Empty consequence part.")
4626            if tokens[partStart] == Lexeme.angleLeft:  # a challenge
4627                result.append(
4628                    self.parseChallengeFromTokens(
4629                        tokens,
4630                        partStart,
4631                        partEnd
4632                    )
4633                )
4634            elif tokens[partStart] == Lexeme.doubleQuestionmark:  # condition
4635                result.append(
4636                    self.parseConditionFromTokens(
4637                        tokens,
4638                        partStart,
4639                        partEnd
4640                    )
4641                )
4642            else:  # Must be an effect
4643                result.append(
4644                    self.parseEffectFromTokens(
4645                        tokens,
4646                        partStart,
4647                        partEnd
4648                    )
4649                )
4650
4651        return result
4652
4653    def parseConsequence(self, encoded: str) -> base.Consequence:
4654        """
4655        Parses a consequence from a string. Uses `lex` and
4656        `ParseFormat.parseConsequenceFromTokens`. For example:
4657
4658        >>> pf = ParseFormat()
4659        >>> c = pf.parseConsequence(
4660        ...   '{gain power}'
4661        ... )
4662        >>> c == [base.effect(gain='power')]
4663        True
4664        >>> pf.unparseConsequence(c)
4665        '{gain power}'
4666        >>> c = pf.parseConsequence(
4667        ...     '{\\n'
4668        ...     '    ??(brawny|!weights*3){\\n'
4669        ...     '        <3>sum(brains, brawn){goto home}>{bounce}\\n'
4670        ...     '    }{};\\n'
4671        ...     '    lose coin*1\\n'
4672        ...     '}'
4673        ... )
4674        >>> len(c)
4675        2
4676        >>> c[0]['condition'] == base.ReqAny([
4677        ...     base.ReqCapability('brawny'),
4678        ...     base.ReqNot(base.ReqTokens('weights', 3))
4679        ... ])
4680        True
4681        >>> len(c[0]['consequence'])
4682        1
4683        >>> len(c[0]['alternative'])
4684        0
4685        >>> cons = c[0]['consequence'][0]
4686        >>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
4687        True
4688        >>> cons['level']
4689        3
4690        >>> len(cons['success'])
4691        1
4692        >>> len(cons['failure'])
4693        1
4694        >>> cons['success'][0] == base.effect(goto='home')
4695        True
4696        >>> cons['failure'][0] == base.effect(bounce=True)
4697        True
4698        >>> cons['outcome'] = False
4699        >>> c[0] == base.condition(
4700        ...     condition=base.ReqAny([
4701        ...         base.ReqCapability('brawny'),
4702        ...         base.ReqNot(base.ReqTokens('weights', 3))
4703        ...     ]),
4704        ...     consequence=[
4705        ...         base.challenge(
4706        ...             skills=base.CombinedSkill('brains', 'brawn'),
4707        ...             level=3,
4708        ...             success=[base.effect(goto='home')],
4709        ...             failure=[base.effect(bounce=True)],
4710        ...             outcome=False
4711        ...         )
4712        ...     ]
4713        ... )
4714        True
4715        >>> c[1] == base.effect(lose=('coin', 1))
4716        True
4717        """
4718        return self.parseConsequenceFromTokens(
4719            lex(encoded, self.reverseFormat)
4720        )
4721
4722
4723#---------------------#
4724# Graphviz dot format #
4725#---------------------#
4726
4727class ParsedDotGraph(TypedDict):
4728    """
4729    Represents a parsed `graphviz` dot-format graph consisting of nodes,
4730    edges, and subgraphs, with attributes attached to nodes and/or
4731    edges. An intermediate format during conversion to a full
4732    `DecisionGraph`. Includes the following slots:
4733
4734    - `'nodes'`: A list of tuples each holding a node ID followed by a
4735        list of name/value attribute pairs.
4736    - `'edges'`: A list of tuples each holding a from-ID, a to-ID,
4737        and then a list of name/value attribute pairs.
4738    - `'attrs'`: A list of tuples each holding a name/value attribute
4739        pair for graph-level attributes.
4740    - `'subgraphs'`: A list of subgraphs (each a tuple with a subgraph
4741        name and then another dictionary in the same format as this
4742        one).
4743    """
4744    nodes: List[Tuple[int, List[Tuple[str, str]]]]
4745    edges: List[Tuple[int, int, List[Tuple[str, str]]]]
4746    attrs: List[Tuple[str, str]]
4747    subgraphs: List[Tuple[str, 'ParsedDotGraph']]
4748
4749
4750def parseSimpleDotAttrs(fragment: str) -> List[Tuple[str, str]]:
4751    """
4752    Given a string fragment that starts with '[' and ends with ']',
4753    parses a simple attribute list in `graphviz` dot format from that
4754    fragment, returning a list of name/value attribute tuples. Raises a
4755    `DotParseError` if the fragment doesn't have the right format.
4756
4757    Examples:
4758
4759    >>> parseSimpleDotAttrs('[ name=value ]')
4760    [('name', 'value')]
4761    >>> parseSimpleDotAttrs('[ a=b c=d e=f ]')
4762    [('a', 'b'), ('c', 'd'), ('e', 'f')]
4763    >>> parseSimpleDotAttrs('[ a=b "c d"="e f" ]')
4764    [('a', 'b'), ('c d', 'e f')]
4765    >>> parseSimpleDotAttrs('[a=b "c d"="e f"]')
4766    [('a', 'b'), ('c d', 'e f')]
4767    >>> parseSimpleDotAttrs('[ a=b "c d"="e f"')
4768    Traceback (most recent call last):
4769    ...
4770    exploration.parsing.DotParseError...
4771    >>> parseSimpleDotAttrs('a=b "c d"="e f" ]')
4772    Traceback (most recent call last):
4773    ...
4774    exploration.parsing.DotParseError...
4775    >>> parseSimpleDotAttrs('[ a b=c ]')
4776    Traceback (most recent call last):
4777    ...
4778    exploration.parsing.DotParseError...
4779    >>> parseSimpleDotAttrs('[ a=b c ]')
4780    Traceback (most recent call last):
4781    ...
4782    exploration.parsing.DotParseError...
4783    >>> parseSimpleDotAttrs('[ name="value" ]')
4784    [('name', 'value')]
4785    >>> parseSimpleDotAttrs('[ name="\\\\"value\\\\"" ]')
4786    [('name', '"value"')]
4787    """
4788    if not fragment.startswith('[') or not fragment.endswith(']'):
4789        raise DotParseError(
4790            f"Simple attrs fragment missing delimiters:"
4791            f"\n  {repr(fragment)}"
4792        )
4793    result = []
4794    rest = fragment[1:-1].strip()
4795    while rest:
4796        # Get possibly-quoted attribute name:
4797        if rest.startswith('"'):
4798            try:
4799                aName, rest = utils.unquoted(rest)
4800            except ValueError:
4801                raise DotParseError(
4802                    f"Malformed quoted attribute name in"
4803                    f" fragment:\n  {repr(fragment)}"
4804                )
4805            rest = rest.lstrip()
4806            if not rest.startswith('='):
4807                raise DotParseError(
4808                    f"Missing '=' in attribute block in"
4809                    f" fragment:\n  {repr(fragment)}"
4810                )
4811            rest = rest[1:].lstrip()
4812        else:
4813            try:
4814                eqInd = rest.index('=')
4815            except ValueError:
4816                raise DotParseError(
4817                    f"Missing '=' in attribute block in"
4818                    f" fragment:\n  {repr(fragment)}"
4819                )
4820            aName = rest[:eqInd]
4821            if ' ' in aName:
4822                raise DotParseError(
4823                    f"Malformed unquoted attribute name"
4824                    f" {repr(aName)} in fragment:"
4825                    f"\n  {repr(fragment)}"
4826                )
4827            rest = rest[eqInd + 1:].lstrip()
4828
4829        # Get possibly-quoted attribute value:
4830        if rest.startswith('"'):
4831            try:
4832                aVal, rest = utils.unquoted(rest)
4833            except ValueError:
4834                raise DotParseError(
4835                    f"Malformed quoted attribute value in"
4836                    f" fragment:\n  {repr(fragment)}"
4837                )
4838            rest = rest.lstrip()
4839        else:
4840            try:
4841                spInd = rest.index(' ')
4842            except ValueError:
4843                spInd = len(rest)
4844            aVal = rest[:spInd]
4845            rest = rest[spInd:].lstrip()
4846
4847        # Append this attribute pair and continue parsing
4848        result.append((aName, aVal))
4849
4850    return result
4851
4852
4853def parseDotNode(
4854    nodeLine: str
4855) -> Tuple[int, Union[bool, List[Tuple[str, str]]]]:
4856    """
4857    Given a line of text from a `graphviz` dot-format graph
4858    (possibly ending in an '[' to indicate attributes to follow, or
4859    possible including a '[ ... ]' block with attributes in-line),
4860    parses it as a node declaration, returning the ID of the node,
4861    along with a boolean indicating whether attributes follow or
4862    not. If an inline attribute block is present, the second member
4863    of the tuple will be a list of attribute name/value pairs. In
4864    that case, all attribute names and values must either be quoted
4865    or not include spaces.
4866    Examples:
4867
4868    >>> parseDotNode('1')
4869    (1, False)
4870    >>> parseDotNode(' 1 [ ')
4871    (1, True)
4872    >>> parseDotNode(' 1 [ a=b "c d"="e f" ] ')
4873    (1, [('a', 'b'), ('c d', 'e f')])
4874    >>> parseDotNode(' 3 [ name="A = \\\\"grate:open\\\\"" ]')
4875    (3, [('name', 'A = "grate:open"')])
4876    >>> parseDotNode('  "1"[')
4877    (1, True)
4878    >>> parseDotNode('  100[')
4879    (100, True)
4880    >>> parseDotNode('  1 2')
4881    Traceback (most recent call last):
4882    ...
4883    exploration.parsing.DotParseError...
4884    >>> parseDotNode('  1 [ 2')
4885    Traceback (most recent call last):
4886    ...
4887    exploration.parsing.DotParseError...
4888    >>> parseDotNode('  1 2')
4889    Traceback (most recent call last):
4890    ...
4891    exploration.parsing.DotParseError...
4892    >>> parseDotNode('  1 [ junk not=attrs ]')
4893    Traceback (most recent call last):
4894    ...
4895    exploration.parsing.DotParseError...
4896    >>> parseDotNode('  \\n')
4897    Traceback (most recent call last):
4898    ...
4899    exploration.parsing.DotParseError...
4900    """
4901    stripped = nodeLine.strip()
4902    if len(stripped) == 0:
4903        raise DotParseError(
4904            "Empty node in dot graph on line:\n  {repr(nodeLine)}"
4905        )
4906    hasAttrs: Union[bool, List[Tuple[str, str]]] = False
4907    if stripped.startswith('"'):
4908        nodeName, rest = utils.unquoted(stripped)
4909        rest = rest.strip()
4910        if rest == '[':
4911            hasAttrs = True
4912        elif rest.startswith('[') and rest.endswith(']'):
4913            hasAttrs = parseSimpleDotAttrs(rest)
4914        elif rest:
4915            raise DotParseError(
4916                f"Extra junk {repr(rest)} after node on line:"
4917                f"\n {repr(nodeLine)}"
4918            )
4919
4920    else:
4921        if stripped.endswith('['):
4922            hasAttrs = True
4923            stripped = stripped[:-1].rstrip()
4924        elif stripped.endswith(']'):
4925            try:
4926                # TODO: Why did this used to be rindex? Was that
4927                # important in some case? (That doesn't work since the
4928                # value may contain a quoted open bracket).
4929                attrStart = stripped.index('[')
4930            except ValueError:
4931                raise DotParseError(
4932                    f"Unmatched ']' on line:\n  {repr(nodeLine)}"
4933                )
4934            hasAttrs = parseSimpleDotAttrs(
4935                stripped[attrStart:]
4936            )
4937            stripped = stripped[:attrStart].rstrip()
4938
4939        if ' ' in stripped:
4940            raise DotParseError(
4941                f"Unquoted multi-word node on line:\n  {repr(nodeLine)}"
4942            )
4943        else:
4944            nodeName = stripped
4945
4946    try:
4947        nodeID = int(nodeName)
4948    except ValueError:
4949        raise DotParseError(
4950            f"Node name f{repr(nodeName)} is not an integer on"
4951            f" line:\n {repr(nodeLine)}"
4952        )
4953
4954    return (nodeID, hasAttrs)
4955
4956
4957def parseDotAttr(attrLine: str) -> Tuple[str, str]:
4958    """
4959    Given a line of text from a `graphviz` dot-format graph, parses
4960    it as an attribute (maybe-quoted-attr-name =
4961    maybe-quoted-attr-value). Returns the (maybe-unquoted) attr-name
4962    and the (maybe-unquoted) attr-value as a pair of strings. Raises
4963    a `DotParseError` if the line cannot be parsed as an attribute.
4964    Examples:
4965
4966    >>> parseDotAttr("a=b")
4967    ('a', 'b')
4968    >>> parseDotAttr("  a = b ")
4969    ('a', 'b')
4970    >>> parseDotAttr('"a" = "b"')
4971    ('a', 'b')
4972    >>> parseDotAttr('"a" -> "b"')
4973    Traceback (most recent call last):
4974    ...
4975    exploration.parsing.DotParseError...
4976    >>> parseDotAttr('"a" = "b" c')
4977    Traceback (most recent call last):
4978    ...
4979    exploration.parsing.DotParseError...
4980    >>> parseDotAttr('a')
4981    Traceback (most recent call last):
4982    ...
4983    exploration.parsing.DotParseError...
4984    >>> parseDotAttr('')
4985    Traceback (most recent call last):
4986    ...
4987    exploration.parsing.DotParseError...
4988    >>> parseDotAttr('0 [ name="A" ]')
4989    Traceback (most recent call last):
4990    ...
4991    exploration.parsing.DotParseError...
4992    """
4993    stripped = attrLine.lstrip()
4994    if len(stripped) == 0:
4995        raise DotParseError(
4996            "Empty attribute in dot graph on line:\n  {repr(attrLine)}"
4997        )
4998    if stripped.endswith(']') or stripped.endswith('['):
4999        raise DotParseError(
5000            f"Node attribute ends in '[' or ']' on line:"
5001            f"\n  {repr(attrLine)}"
5002        )
5003    if stripped.startswith('"'):
5004        try:
5005            attrName, rest = utils.unquoted(stripped)
5006        except ValueError:
5007            raise DotParseError(
5008                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5009            )
5010        rest = rest.lstrip()
5011        if len(rest) == 0 or rest[0] != '=':
5012            raise DotParseError(
5013                f"No equals sign following attribute name on"
5014                f" line:\n  {repr(attrLine)}"
5015            )
5016        rest = rest[1:].lstrip()
5017    else:
5018        try:
5019            eqInd = stripped.index('=')
5020        except ValueError:
5021            raise DotParseError(
5022                f"No equals sign in attribute line:"
5023                f"\n  {repr(attrLine)}"
5024            )
5025        attrName = stripped[:eqInd].rstrip()
5026        rest = stripped[eqInd + 1:].lstrip()
5027
5028    if rest[0] == '"':
5029        try:
5030            attrVal, rest = utils.unquoted(rest)
5031        except ValueError:
5032            raise DotParseError(
5033                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5034            )
5035        if rest.strip():
5036            raise DotParseError(
5037                f"Junk after attribute on line:"
5038                f"\n  {repr(attrLine)}"
5039            )
5040    else:
5041        attrVal = rest.rstrip()
5042
5043    return attrName, attrVal
5044
5045
5046def parseDotEdge(edgeLine: str) -> Tuple[int, int, bool]:
5047    """
5048    Given a line of text from a `graphviz` dot-format graph, parses
5049    it as an edge (fromID -> toID). Returns a tuple containing the
5050    from ID, the to ID, and a boolean indicating whether attributes
5051    follow the edge on subsequent lines (true if the line ends with
5052    '['). Raises a `DotParseError` if the line cannot be parsed as
5053    an edge pair. Examples:
5054
5055    >>> parseDotEdge("1 -> 2")
5056    (1, 2, False)
5057    >>> parseDotEdge("  1 -> 2 ")
5058    (1, 2, False)
5059    >>> parseDotEdge('"1" -> "2"')
5060    (1, 2, False)
5061    >>> parseDotEdge('"1" -> "2" [')
5062    (1, 2, True)
5063    >>> parseDotEdge("a -> b")
5064    Traceback (most recent call last):
5065    ...
5066    exploration.parsing.DotParseError...
5067    >>> parseDotEdge('"1" = "1"')
5068    Traceback (most recent call last):
5069    ...
5070    exploration.parsing.DotParseError...
5071    >>> parseDotEdge('"1" -> "2" c')
5072    Traceback (most recent call last):
5073    ...
5074    exploration.parsing.DotParseError...
5075    >>> parseDotEdge('1')
5076    Traceback (most recent call last):
5077    ...
5078    exploration.parsing.DotParseError...
5079    >>> parseDotEdge('')
5080    Traceback (most recent call last):
5081    ...
5082    exploration.parsing.DotParseError...
5083    """
5084    stripped = edgeLine.lstrip()
5085    if len(stripped) == 0:
5086        raise DotParseError(
5087            "Empty edge in dot graph on line:\n  {repr(edgeLine)}"
5088        )
5089    if stripped.startswith('"'):
5090        try:
5091            fromStr, rest = utils.unquoted(stripped)
5092        except ValueError:
5093            raise DotParseError(
5094                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5095            )
5096        rest = rest.lstrip()
5097        if rest[:2] != '->':
5098            raise DotParseError(
5099                f"No arrow sign following source name on"
5100                f" line:\n  {repr(edgeLine)}"
5101            )
5102        rest = rest[2:].lstrip()
5103    else:
5104        try:
5105            arrowInd = stripped.index('->')
5106        except ValueError:
5107            raise DotParseError(
5108                f"No arrow in edge line:"
5109                f"\n  {repr(edgeLine)}"
5110            )
5111        fromStr = stripped[:arrowInd].rstrip()
5112        rest = stripped[arrowInd + 2:].lstrip()
5113        if ' ' in fromStr:
5114            raise DotParseError(
5115                f"Unquoted multi-word edge source on line:"
5116                f"\n  {repr(edgeLine)}"
5117            )
5118
5119    hasAttrs = False
5120    if rest[0] == '"':
5121        try:
5122            toStr, rest = utils.unquoted(rest)
5123        except ValueError:
5124            raise DotParseError(
5125                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5126            )
5127        stripped = rest.strip()
5128        if stripped == '[':
5129            hasAttrs = True
5130        elif stripped:
5131            raise DotParseError(
5132                f"Junk after edge on line:"
5133                f"\n  {repr(edgeLine)}"
5134            )
5135    else:
5136        toStr = rest.rstrip()
5137        if toStr.endswith('['):
5138            toStr = toStr[:-1].rstrip()
5139            hasAttrs = True
5140        if ' ' in toStr:
5141            raise DotParseError(
5142                f"Unquoted multi-word edge destination on line:"
5143                f"\n  {repr(edgeLine)}"
5144            )
5145
5146    try:
5147        fromID = int(fromStr)
5148    except ValueError:
5149        raise DotParseError(
5150            f"Invalid 'from' ID: {repr(fromStr)} on line:"
5151            f"\n  {repr(edgeLine)}"
5152        )
5153
5154    try:
5155        toID = int(toStr)
5156    except ValueError:
5157        raise DotParseError(
5158            f"Invalid 'to' ID: {repr(toStr)} on line:"
5159            f"\n  {repr(edgeLine)}"
5160        )
5161
5162    return (fromID, toID, hasAttrs)
5163
5164
5165def parseDotAttrList(
5166    lines: List[str]
5167) -> Tuple[List[Tuple[str, str]], List[str]]:
5168    """
5169    Given a list of lines of text from a `graphviz` dot-format
5170    graph which starts with an attribute line, parses multiple
5171    attribute lines until a line containing just ']' is found.
5172    Returns a list of the parsed name/value attribute pair tuples,
5173    along with a list of remaining unparsed strings (not counting
5174    the closing ']' line). Raises a `DotParseError` if it finds a
5175    non-attribute line or if it fails to find a closing ']' line.
5176    Examples:
5177
5178    >>> parseDotAttrList([
5179    ...     'a=b\\n',
5180    ...     'c=d\\n',
5181    ...     ']\\n',
5182    ... ])
5183    ([('a', 'b'), ('c', 'd')], [])
5184    >>> parseDotAttrList([
5185    ...     'a=b',
5186    ...     'c=d',
5187    ...     '  ]',
5188    ...     'more',
5189    ...     'lines',
5190    ... ])
5191    ([('a', 'b'), ('c', 'd')], ['more', 'lines'])
5192    >>> parseDotAttrList([
5193    ...     'a=b',
5194    ...     'c=d',
5195    ... ])
5196    Traceback (most recent call last):
5197    ...
5198    exploration.parsing.DotParseError...
5199    """
5200    index = 0
5201    found = []
5202    while index < len(lines):
5203        thisLine = lines[index]
5204        try:
5205            found.append(parseDotAttr(thisLine))
5206        except DotParseError:
5207            if thisLine.strip() == ']':
5208                return (found, lines[index + 1:])
5209            else:
5210                raise DotParseError(
5211                    f"Could not parse attribute from line:"
5212                    f"\n  {repr(thisLine)}"
5213                    f"\nAttributes block starts on line:"
5214                    f"\n  {repr(lines[0])}"
5215                )
5216        index += 1
5217
5218    raise DotParseError(
5219        f"No list terminator (']') for attributes starting on line:"
5220        f"\n  {repr(lines[0])}"
5221    )
5222
5223
5224def parseDotSubgraphStart(line: str) -> str:
5225    """
5226    Parses the start of a subgraph from a line of a graph file. The
5227    line must start with the word 'subgraph' and then have a name,
5228    followed by a '{' at the end of the line. Raises a
5229    `DotParseError` if this format doesn't match. Examples:
5230
5231    >>> parseDotSubgraphStart('subgraph A {')
5232    'A'
5233    >>> parseDotSubgraphStart('subgraph A B {')
5234    Traceback (most recent call last):
5235    ...
5236    exploration.parsing.DotParseError...
5237    >>> parseDotSubgraphStart('subgraph "A B" {')
5238    'A B'
5239    >>> parseDotSubgraphStart('subgraph A')
5240    Traceback (most recent call last):
5241    ...
5242    exploration.parsing.DotParseError...
5243    """
5244    stripped = line.strip()
5245    if len(stripped) == 0:
5246        raise DotParseError(
5247            f"Empty line where subgraph was expected:"
5248            f"\n  {repr(line)}"
5249        )
5250
5251    if not stripped.startswith('subgraph '):
5252        raise DotParseError(
5253            f"Subgraph doesn't start with 'subgraph' on line:"
5254            f"\n  {repr(line)}"
5255        )
5256
5257    stripped = stripped[9:]
5258    if stripped.startswith('"'):
5259        try:
5260            name, rest = utils.unquoted(stripped)
5261        except ValueError:
5262            raise DotParseError(
5263                f"Malformed quotes on subgraph line:\n {repr(line)}"
5264            )
5265        if rest.strip() != '{':
5266            raise DotParseError(
5267                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5268            )
5269    else:
5270        parts = stripped.split()
5271        if len(parts) != 2 or parts[1] != '{':
5272            raise DotParseError(
5273                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5274            )
5275        name, _ = parts
5276
5277    return name
5278
5279
5280def parseDotGraphContents(
5281    lines: List[str]
5282) -> Tuple[ParsedDotGraph, List[str]]:
5283    """
5284    Given a list of lines from a `graphviz` dot-format string,
5285    parses the list as the contents of a graph (or subgraph),
5286    stopping when it reaches a line that just contains '}'. Raises a
5287    `DotParseError` if it cannot do so or if the terminator is
5288    missing. Returns a tuple containing the parsed graph data (see
5289    `ParsedDotGraph` and the list of remaining lines after the
5290    terminator. Recursively parses subgraphs. Example:
5291
5292    >>> bits = parseDotGraphContents([
5293    ...     '"graph attr"=1',
5294    ...     '1 [',
5295    ...     '  attr=value',
5296    ...     ']',
5297    ...     '1 -> 2 [',
5298    ...     '  fullLabel="to_B"',
5299    ...     '  quality=number',
5300    ...     ']',
5301    ...     'subgraph name {',
5302    ...     '  300',
5303    ...     '  400',
5304    ...     '  300 -> 400 [',
5305    ...     '    fullLabel=forward',
5306    ...     '  ]',
5307    ...     '}',
5308    ...     '}',
5309    ... ])
5310    >>> len(bits)
5311    2
5312    >>> g = bits[0]
5313    >>> bits[1]
5314    []
5315    >>> sorted(g.keys())
5316    ['attrs', 'edges', 'nodes', 'subgraphs']
5317    >>> g['nodes']
5318    [(1, [('attr', 'value')])]
5319    >>> g['edges']
5320    [(1, 2, [('fullLabel', 'to_B'), ('quality', 'number')])]
5321    >>> g['attrs']
5322    [('graph attr', '1')]
5323    >>> sgs = g['subgraphs']
5324    >>> len(sgs)
5325    1
5326    >>> len(sgs[0])
5327    2
5328    >>> sgs[0][0]
5329    'name'
5330    >>> sg = sgs[0][1]
5331    >>> sorted(sg.keys())
5332    ['attrs', 'edges', 'nodes', 'subgraphs']
5333    >>> sg["nodes"]
5334    [(300, []), (400, [])]
5335    >>> sg["edges"]
5336    [(300, 400, [('fullLabel', 'forward')])]
5337    >>> sg["attrs"]
5338    []
5339    >>> sg["subgraphs"]
5340    []
5341    """
5342    result: ParsedDotGraph = {
5343        'nodes': [],
5344        'edges': [],
5345        'attrs': [],
5346        'subgraphs': [],
5347    }
5348    index = 0
5349    remainder = None
5350    # Consider each line:
5351    while index < len(lines):
5352        # Grab line and pre-increment index
5353        thisLine = lines[index]
5354        index += 1
5355
5356        # Check for } first because it could be parsed as a node
5357        stripped = thisLine.strip()
5358        if stripped == '}':
5359            remainder = lines[index:]
5360            break
5361        elif stripped == '':  # ignore blank lines
5362            continue
5363
5364        # Cascading parsing attempts, since the possibilities are
5365        # mostly mutually exclusive.
5366        # TODO: Node/attr confusion with = in a node name?
5367        try:
5368            attrName, attrVal = parseDotAttr(thisLine)
5369            result['attrs'].append((attrName, attrVal))
5370        except DotParseError:
5371            try:
5372                fromNode, toNode, hasEAttrs = parseDotEdge(
5373                    thisLine
5374                )
5375                if hasEAttrs:
5376                    attrs, rest = parseDotAttrList(
5377                        lines[index:]
5378                    )
5379                    # Restart to process rest
5380                    lines = rest
5381                    index = 0
5382                else:
5383                    attrs = []
5384                result['edges'].append((fromNode, toNode, attrs))
5385            except DotParseError:
5386                try:
5387                    nodeName, hasNAttrs = parseDotNode(
5388                        thisLine
5389                    )
5390                    if hasNAttrs is True:
5391                        attrs, rest = parseDotAttrList(
5392                            lines[index:]
5393                        )
5394                        # Restart to process rest
5395                        lines = rest
5396                        index = 0
5397                    elif hasNAttrs:
5398                        attrs = hasNAttrs
5399                    else:
5400                        attrs = []
5401                    result['nodes'].append((nodeName, attrs))
5402                except DotParseError:
5403                    try:
5404                        subName = parseDotSubgraphStart(
5405                            thisLine
5406                        )
5407                        subStuff, rest = \
5408                            parseDotGraphContents(
5409                                lines[index:]
5410                            )
5411                        result['subgraphs'].append((subName, subStuff))
5412                        # Restart to process rest
5413                        lines = rest
5414                        index = 0
5415                    except DotParseError:
5416                        raise DotParseError(
5417                            f"Unrecognizable graph line (possibly"
5418                            f" beginning of unfinished structure):"
5419                            f"\n  {repr(thisLine)}"
5420                        )
5421    if remainder is None:
5422        raise DotParseError(
5423            f"Graph (or subgraph) is missing closing '}}'. Starts"
5424            f" on line:\n  {repr(lines[0])}"
5425        )
5426    else:
5427        return (result, remainder)
5428
5429
5430def parseDot(
5431    dotStr: str,
5432    parseFormat: ParseFormat = ParseFormat()
5433) -> core.DecisionGraph:
5434    """
5435    Converts a `graphviz` dot-format string into a `core.DecisionGraph`.
5436    A custom `ParseFormat` may be specified if desired; the default
5437    `ParseFormat` is used if not. Note that this relies on specific
5438    indentation schemes used by `toDot` so a hand-edited dot-format
5439    graph will probably not work. A `DotParseError` is raised if the
5440    provided string can't be parsed. Example
5441
5442    >>> parseDotNode(' 3 [ label="A = \\\\"grate:open\\\\"" ]')
5443    (3, [('label', 'A = "grate:open"')])
5444    >>> sg = '''\
5445    ... subgraph __requirements__ {
5446    ...   3 [ label="A = \\\\"grate:open\\\\"" ]
5447    ...   4 [ label="B = \\\\"!(helmet)\\\\"" ]
5448    ...   5 [ label="C = \\\\"helmet\\\\"" ]
5449    ... }'''
5450    >>> parseDotGraphContents(sg.splitlines()[1:])
5451    ({'nodes': [(3, [('label', 'A = "grate:open"')]),\
5452 (4, [('label', 'B = "!(helmet)"')]), (5, [('label', 'C = "helmet"')])],\
5453 'edges': [], 'attrs': [], 'subgraphs': []}, [])
5454    >>> from . import core
5455    >>> dg = core.DecisionGraph.example('simple')
5456    >>> encoded = toDot(dg)
5457    >>> reconstructed = parseDot(encoded)
5458    >>> for diff in dg.listDifferences(reconstructed):
5459    ...     print(diff)
5460    >>> reconstructed == dg
5461    True
5462    >>> dg = core.DecisionGraph.example('abc')
5463    >>> encoded = toDot(dg)
5464    >>> reconstructed = parseDot(encoded)
5465    >>> for diff in dg.listDifferences(reconstructed):
5466    ...     print(diff)
5467    >>> reconstructed == dg
5468    True
5469    >>> tg = core.DecisionGraph()
5470    >>> tg.addDecision('A')
5471    0
5472    >>> tg.addDecision('B')
5473    1
5474    >>> tg.addTransition('A', 'up', 'B', 'down')
5475    >>> same = parseDot('''
5476    ... digraph {
5477    ...     0 [ name=A label=A ]
5478    ...       0 -> 1 [
5479    ...         label=up
5480    ...         fullLabel=up
5481    ...         reciprocal=down
5482    ...       ]
5483    ...     1 [ name=B label=B ]
5484    ...       1 -> 0 [
5485    ...         label=down
5486    ...         fullLabel=down
5487    ...         reciprocal=up
5488    ...       ]
5489    ... }''')
5490    >>> for diff in tg.listDifferences(same):
5491    ...     print(diff)
5492    >>> same == tg
5493    True
5494    >>> pf = ParseFormat()
5495    >>> tg.setTransitionRequirement('A', 'up', pf.parseRequirement('one|two'))
5496    >>> tg.setConsequence(
5497    ...     'B',
5498    ...     'down',
5499    ...     [base.effect(gain="one")]
5500    ... )
5501    >>> test = parseDot('''
5502    ...   digraph {
5503    ...     0 [ name="A = \\\\"one|two\\\\"" label="A = \\\\"one|two\\\\"" ]
5504    ...   }
5505    ... ''')
5506    >>> list(test.nodes)
5507    [0]
5508    >>> test.nodes[0]['name']
5509    'A = "one|two"'
5510    >>> eff = (
5511    ...   r'"A = \\"[{\\\\\\"type\\\\\\": \\\\\\"gain\\\\\\",'
5512    ...   r' \\\\\\"applyTo\\\\\\": \\\\\\"active\\\\\\",'
5513    ...   r' \\\\\\"value\\\\\\": \\\\\\"one\\\\\\",'
5514    ...   r' \\\\\\"charges\\\\\\": null, \\\\\\"hidden\\\\\\": false,'
5515    ...   r' \\\\\\"delay\\\\\\": null}]\\""'
5516    ... )
5517    >>> utils.unquoted(eff)[1]
5518    ''
5519    >>> test2 = parseDot(
5520    ...     'digraph {\\n 0 [ name=' + eff + ' label=' + eff + ' ]\\n}'
5521    ... )
5522    >>> s = test2.nodes[0]['name']
5523    >>> s[:25]
5524    'A = "[{\\\\"type\\\\": \\\\"gain\\\\"'
5525    >>> s[25:50]
5526    ', \\\\"applyTo\\\\": \\\\"active\\\\"'
5527    >>> s[50:70]
5528    ', \\\\"value\\\\": \\\\"one\\\\"'
5529    >>> s[70:89]
5530    ', \\\\"charges\\\\": null'
5531    >>> s[89:108]
5532    ', \\\\"hidden\\\\": false'
5533    >>> s[108:]
5534    ', \\\\"delay\\\\": null}]"'
5535    >>> ae = s[s.index('=') + 1:].strip()
5536    >>> uq, after = utils.unquoted(ae)
5537    >>> after
5538    ''
5539    >>> fromJSON(uq) == [base.effect(gain="one")]
5540    True
5541    >>> same = parseDot('''
5542    ... digraph {
5543    ...   0 [ name=A label=A ]
5544    ...     0 -> 1 [
5545    ...       label=up
5546    ...       fullLabel=up
5547    ...       reciprocal=down
5548    ...       req=A
5549    ...     ]
5550    ...   1 [ name=B label=B ]
5551    ...     1 -> 0 [
5552    ...       label=down
5553    ...       fullLabel=down
5554    ...       reciprocal=up
5555    ...       consequence=A
5556    ...     ]
5557    ...   subgraph __requirements__ {
5558    ...     2 [ label="A = \\\\"one|two\\\\"" ]
5559    ...   }
5560    ...   subgraph __consequences__ {
5561    ...     3 [ label=''' + eff + ''' ]
5562    ...   }
5563    ... }''')
5564    >>> c = {'tags': {}, 'annotations': [], 'reciprocal': 'up', 'consequence': [{'type': 'gain', 'applyTo': 'active', 'value': 'one', 'delay': None, 'charges': None}]}['consequence']  # noqa
5565
5566    >>> for diff in tg.listDifferences(same):
5567    ...     print(diff)
5568    >>> same == tg
5569    True
5570    """
5571    lines = dotStr.splitlines()
5572    while lines[0].strip() == '':
5573        lines.pop(0)
5574    if lines.pop(0).strip() != "digraph {":
5575        raise DotParseError("Input doesn't begin with 'digraph {'.")
5576
5577    # Create our result
5578    result = core.DecisionGraph()
5579
5580    # Parse to intermediate graph data structure
5581    graphStuff, remaining = parseDotGraphContents(lines)
5582    if remaining:
5583        if len(remaining) <= 4:
5584            junk = '\n  '.join(repr(line) for line in remaining)
5585        else:
5586            junk = '\n  '.join(repr(line) for line in remaining[:4])
5587            junk += '\n  ...'
5588        raise DotParseError("Extra junk after graph:\n  {junk}")
5589
5590    # Sort out subgraphs to find legends
5591    zoneSubs = []
5592    reqLegend = None
5593    consequenceLegend = None
5594    mechanismLegend = None
5595    for sub in graphStuff['subgraphs']:
5596        if sub[0] == '__requirements__':
5597            reqLegend = sub[1]
5598        elif sub[0] == '__consequences__':
5599            consequenceLegend = sub[1]
5600        elif sub[0] == '__mechanisms__':
5601            mechanismLegend = sub[1]
5602        else:
5603            zoneSubs.append(sub)
5604
5605    # Build out our mapping from requirement abbreviations to actual
5606    # requirement objects
5607    reqMap: Dict[str, base.Requirement] = {}
5608    if reqLegend is not None:
5609        if reqLegend['edges']:
5610            raise DotParseError(
5611                f"Requirements legend subgraph has edges:"
5612                f"\n  {repr(reqLegend['edges'])}"
5613                f"\n(It should only have nodes.)"
5614            )
5615        if reqLegend['attrs']:
5616            raise DotParseError(
5617                f"Requirements legend subgraph has attributes:"
5618                f"\n  {repr(reqLegend['attrs'])}"
5619                f"\n(It should only have nodes.)"
5620            )
5621        if reqLegend['subgraphs']:
5622            raise DotParseError(
5623                f"Requirements legend subgraph has subgraphs:"
5624                f"\n  {repr(reqLegend['subgraphs'])}"
5625                f"\n(It should only have nodes.)"
5626            )
5627        for node, attrs in reqLegend['nodes']:
5628            if not attrs:
5629                raise DotParseError(
5630                    f"Node in requirements legend missing attributes:"
5631                    f"\n  {repr(attrs)}"
5632                )
5633            if len(attrs) != 1:
5634                raise DotParseError(
5635                    f"Node in requirements legend has multiple"
5636                    f" attributes:\n  {repr(attrs)}"
5637                )
5638            reqStr = attrs[0][1]
5639            try:
5640                eqInd = reqStr.index('=')
5641            except ValueError:
5642                raise DotParseError(
5643                    f"Missing '=' in requirement specifier:"
5644                    f"\n  {repr(reqStr)}"
5645                )
5646            ab = reqStr[:eqInd].rstrip()
5647            encoded = reqStr[eqInd + 1:].lstrip()
5648            try:
5649                encVal, empty = utils.unquoted(encoded)
5650            except ValueError:
5651                raise DotParseError(
5652                    f"Invalid quoted requirement value:"
5653                    f"\n  {repr(encoded)}"
5654                )
5655            if empty.strip():
5656                raise DotParseError(
5657                    f"Extra junk after requirement value:"
5658                    f"\n  {repr(empty)}"
5659                )
5660            try:
5661                req = parseFormat.parseRequirement(encVal)
5662            except ValueError:
5663                raise DotParseError(
5664                    f"Invalid encoded requirement in requirements"
5665                    f" legend:\n  {repr(encVal)}"
5666                )
5667            if ab in reqMap:
5668                raise DotParseError(
5669                    f"Abbreviation '{ab}' was defined multiple"
5670                    f" times in requirements legend."
5671                )
5672            reqMap[ab] = req
5673
5674    # Build out our mapping from consequence abbreviations to actual
5675    # consequence lists
5676    consequenceMap: Dict[str, base.Consequence] = {}
5677    if consequenceLegend is not None:
5678        if consequenceLegend['edges']:
5679            raise DotParseError(
5680                f"Consequences legend subgraph has edges:"
5681                f"\n  {repr(consequenceLegend['edges'])}"
5682                f"\n(It should only have nodes.)"
5683            )
5684        if consequenceLegend['attrs']:
5685            raise DotParseError(
5686                f"Consequences legend subgraph has attributes:"
5687                f"\n  {repr(consequenceLegend['attrs'])}"
5688                f"\n(It should only have nodes.)"
5689            )
5690        if consequenceLegend['subgraphs']:
5691            raise DotParseError(
5692                f"Consequences legend subgraph has subgraphs:"
5693                f"\n  {repr(consequenceLegend['subgraphs'])}"
5694                f"\n(It should only have nodes.)"
5695            )
5696        for node, attrs in consequenceLegend['nodes']:
5697            if not attrs:
5698                raise DotParseError(
5699                    f"Node in consequence legend missing attributes:"
5700                    f"\n  {repr(attrs)}"
5701                )
5702            if len(attrs) != 1:
5703                raise DotParseError(
5704                    f"Node in consequences legend has multiple"
5705                    f" attributes:\n  {repr(attrs)}"
5706                )
5707            consStr = attrs[0][1]
5708            try:
5709                eqInd = consStr.index('=')
5710            except ValueError:
5711                raise DotParseError(
5712                    f"Missing '=' in consequence string:"
5713                    f"\n  {repr(consStr)}"
5714                )
5715            ab = consStr[:eqInd].rstrip()
5716            encoded = consStr[eqInd + 1:].lstrip()
5717            try:
5718                encVal, empty = utils.unquoted(encoded)
5719            except ValueError:
5720                raise DotParseError(
5721                    f"Invalid quoted consequence value:"
5722                    f"\n  {repr(encoded)}"
5723                )
5724            if empty.strip():
5725                raise DotParseError(
5726                    f"Extra junk after consequence value:"
5727                    f"\n  {repr(empty)}"
5728                )
5729            try:
5730                consequences = fromJSON(encVal)
5731            except json.decoder.JSONDecodeError:
5732                raise DotParseError(
5733                    f"Invalid encoded consequence in requirements"
5734                    f" legend:\n  {repr(encVal)}"
5735                )
5736            if ab in consequenceMap:
5737                raise DotParseError(
5738                    f"Abbreviation '{ab}' was defined multiple"
5739                    f" times in effects legend."
5740                )
5741            consequenceMap[ab] = consequences
5742
5743    # Reconstruct mechanisms
5744    if mechanismLegend is not None:
5745        if mechanismLegend['edges']:
5746            raise DotParseError(
5747                f"Mechanisms legend subgraph has edges:"
5748                f"\n  {repr(mechanismLegend['edges'])}"
5749                f"\n(It should only have nodes.)"
5750            )
5751        if mechanismLegend['attrs']:
5752            raise DotParseError(
5753                f"Mechanisms legend subgraph has attributes:"
5754                f"\n  {repr(mechanismLegend['attrs'])}"
5755                f"\n(It should only have nodes.)"
5756            )
5757        if mechanismLegend['subgraphs']:
5758            raise DotParseError(
5759                f"Mechanisms legend subgraph has subgraphs:"
5760                f"\n  {repr(mechanismLegend['subgraphs'])}"
5761                f"\n(It should only have nodes.)"
5762            )
5763        for node, attrs in mechanismLegend['nodes']:
5764            if not attrs:
5765                raise DotParseError(
5766                    f"Node in mechanisms legend missing attributes:"
5767                    f"\n  {repr(attrs)}"
5768                )
5769            if len(attrs) != 1:
5770                raise DotParseError(
5771                    f"Node in mechanisms legend has multiple"
5772                    f" attributes:\n  {repr(attrs)}"
5773                )
5774            mechStr = attrs[0][1]
5775            try:
5776                atInd = mechStr.index('@')
5777                colonInd = mechStr.index(':')
5778            except ValueError:
5779                raise DotParseError(
5780                    f"Missing '@' or ':' in mechanism string:"
5781                    f"\n  {repr(mechStr)}"
5782                )
5783            if atInd > colonInd:
5784                raise DotParseError(
5785                    f"':' after '@' in mechanism string:"
5786                    f"\n  {repr(mechStr)}"
5787                )
5788            mID: base.MechanismID
5789            where: Optional[base.DecisionID]
5790            mName: base.MechanismName
5791            try:
5792                mID = int(mechStr[:atInd].rstrip())
5793            except ValueError:
5794                raise DotParseError(
5795                    f"Invalid mechanism ID in mechanism string:"
5796                    f"\n  {repr(mechStr)}"
5797                )
5798            try:
5799                whereStr = mechStr[atInd + 1:colonInd].strip()
5800                if whereStr == "None":
5801                    where = None
5802                else:
5803                    where = int(whereStr)
5804            except ValueError:
5805                raise DotParseError(
5806                    f"Invalid mechanism location in mechanism string:"
5807                    f"\n  {repr(mechStr)}"
5808                )
5809            mName, rest = utils.unquoted(mechStr[colonInd + 1:].lstrip())
5810            if rest.strip():
5811                raise DotParseError(
5812                    f"Junk after mechanism name in mechanism string:"
5813                    f"\n  {repr(mechStr)}"
5814                )
5815            result.mechanisms[mID] = (where, mName)
5816            if where is None:
5817                result.globalMechanisms[mName] = mID
5818
5819    # Add zones to the graph based on parent info
5820    # Map from zones to children we should add to them once all
5821    # zones are created:
5822    zoneChildMap: Dict[str, List[str]] = {}
5823    for prefixedName, graphData in zoneSubs:
5824        # Chop off cluster_ or _ prefix:
5825        zoneName = prefixedName[prefixedName.index('_') + 1:]
5826        if graphData['edges']:
5827            raise DotParseError(
5828                f"Zone subgraph for zone {repr(zoneName)} has edges:"
5829                f"\n  {repr(graphData['edges'])}"
5830                f"\n(It should only have nodes and attributes.)"
5831            )
5832        if graphData['subgraphs']:
5833            raise DotParseError(
5834                f"Zone subgraph for zone {repr(zoneName)} has"
5835                f" subgraphs:"
5836                f"\n  {repr(graphData['subgraphs'])}"
5837                f"\n(It should only have nodes and attributes.)"
5838            )
5839        # Note: we ignore nodes as that info is used for
5840        # visualization but is redundant with the zone parent info
5841        # stored in nodes, and it would be tricky to tease apart
5842        # direct vs. indirect relationships from merged info.
5843        parents = None
5844        level = None
5845        for attr, aVal in graphData['attrs']:
5846            if attr == 'parents':
5847                try:
5848                    parents = set(fromJSON(aVal))
5849                except json.decoder.JSONDecodeError:
5850                    raise DotParseError(
5851                        f"Invalid parents JSON in zone subgraph for"
5852                        f" zone '{zoneName}':\n  {repr(aVal)}"
5853                    )
5854            elif attr == 'level':
5855                try:
5856                    level = int(aVal)
5857                except ValueError:
5858                    raise DotParseError(
5859                        f"Invalid level in zone subgraph for"
5860                        f" zone '{zoneName}':\n  {repr(aVal)}"
5861                    )
5862            elif attr == 'label':
5863                pass  # name already extracted from the subgraph name
5864
5865            else:
5866                raise DotParseError(
5867                    f"Unexpected attribute '{attr}' in zone"
5868                    f" subgraph for zone '{zoneName}'"
5869                )
5870        if parents is None:
5871            raise DotParseError(
5872                f"No parents attribute for zone '{zoneName}'."
5873                f" Graph is:\n  {repr(graphData)}"
5874            )
5875        if level is None:
5876            raise DotParseError(
5877                f"No level attribute for zone '{zoneName}'."
5878                f" Graph is:\n  {repr(graphData)}"
5879            )
5880
5881        # Add ourself to our parents in the child map
5882        for parent in parents:
5883            zoneChildMap.setdefault(parent, []).append(zoneName)
5884
5885        # Create this zone
5886        result.createZone(zoneName, level)
5887
5888    # Add zone parent/child relationships
5889    for parent, children in zoneChildMap.items():
5890        for child in children:
5891            result.addZoneToZone(child, parent)
5892
5893    # Add nodes to the graph
5894    for (node, attrs) in graphStuff['nodes']:
5895        name: Optional[str] = None
5896        annotations = []
5897        tags: Dict[base.Tag, base.TagValue] = {}
5898        zones = []
5899        for attr, aVal in attrs:
5900            if attr == 'name':  # it's the name
5901                name = aVal
5902            elif attr == 'label':  # zone + name; redundant
5903                pass
5904            elif attr.startswith('t_'):  # it's a tag
5905                tagName = attr[2:]
5906                try:
5907                    tagAny = fromJSON(aVal)
5908                except json.decoder.JSONDecodeError:
5909                    raise DotParseError(
5910                        f"Error in JSON for tag attr '{attr}' of node"
5911                        f" '{node}'"
5912                    )
5913                if isinstance(tagAny, base.TagValueTypes):
5914                    tagVal: base.TagValue = cast(base.TagValue, tagAny)
5915                else:
5916                    raise DotParseError(
5917                        f"JSON for tag value encodes disallowed tag"
5918                        f" value of type {type(tagAny)}. Value is:"
5919                        f"\n  {repr(tagAny)}"
5920                    )
5921                tags[tagName] = tagVal
5922            elif attr.startswith('z_'):  # it's a zone
5923                zones.append(attr[2:])
5924            elif attr == 'annotations':  # It's the annotations
5925                try:
5926                    annotations = fromJSON(aVal)
5927                except json.decoder.JSONDecodeError:
5928                    raise DotParseError(
5929                        f"Bad JSON in attribute '{attr}' of node"
5930                        f" '{node}'"
5931                    )
5932            else:
5933                raise DotParseError(
5934                    f"Unrecognized node attribute '{attr}' for node"
5935                    f" '{node}'"
5936                )
5937
5938        # TODO: Domains here?
5939        if name is None:
5940            raise DotParseError(f"Node '{node}' does not have a name.")
5941
5942        result.addIdentifiedDecision(
5943            node,
5944            name,
5945            tags=tags,
5946            annotations=annotations
5947        )
5948        for zone in zones:
5949            try:
5950                result.addDecisionToZone(node, zone)
5951            except core.MissingZoneError:
5952                raise DotParseError(
5953                    f"Zone '{zone}' for node {node} does not"
5954                    f" exist."
5955                )
5956
5957    # Add mechanisms to each node:
5958    for (mID, (where, mName)) in result.mechanisms.items():
5959        mPool = result.nodes[where].setdefault('mechanisms', {})
5960        if mName in mPool:
5961            raise DotParseError(
5962                f"Multiple mechanisms named {mName!r} at"
5963                f" decision {where}."
5964            )
5965        mPool[mName] = mID
5966
5967    # Reciprocals to double-check once all edges are added
5968    recipChecks: Dict[
5969        Tuple[base.DecisionID, base.Transition],
5970        base.Transition
5971    ] = {}
5972
5973    # Add each edge
5974    for (source, dest, attrs) in graphStuff['edges']:
5975        annotations = []
5976        tags = {}
5977        label = None
5978        requirements = None
5979        consequence = None
5980        reciprocal = None
5981        for attr, aVal in attrs:
5982            if attr.startswith('t_'):
5983                try:
5984                    tags[attr[2:]] = fromJSON(aVal)
5985                except json.decoder.JSONDecodeError:
5986                    raise DotParseError(
5987                        f"Invalid JSON in edge tag '{attr}' for edge"
5988                        f"from '{source}' to '{dest}':"
5989                        f"\n  {repr(aVal)}"
5990                    )
5991            elif attr == "label":  # We ignore the short-label
5992                pass
5993            elif attr == "fullLabel":  # This is our transition name
5994                label = aVal
5995            elif attr == "reciprocal":
5996                reciprocal = aVal
5997            elif attr == "req":
5998                reqAbbr = aVal
5999                if reqAbbr not in reqMap:
6000                    raise DotParseError(
6001                        f"Edge from '{source}' to '{dest}' has"
6002                        f" requirement abbreviation '{reqAbbr}'"
6003                        f" but that abbreviation was not listed"
6004                        f" in the '__requirements__' subgraph."
6005                    )
6006                requirements = reqMap[reqAbbr]
6007            elif attr == "consequence":
6008                consequenceAbbr = aVal
6009                if consequenceAbbr not in reqMap:
6010                    raise DotParseError(
6011                        f"Edge from '{source}' to '{dest}' has"
6012                        f" consequence abbreviation"
6013                        f" '{consequenceAbbr}' but that"
6014                        f" abbreviation was not listed in the"
6015                        f" '__consequences__' subgraph."
6016                    )
6017                consequence = consequenceMap[consequenceAbbr]
6018            elif attr == "annotations":
6019                try:
6020                    annotations = fromJSON(aVal)
6021                except json.decoder.JSONDecodeError:
6022                    raise DotParseError(
6023                        f"Invalid JSON in edge annotations for"
6024                        f" edge from '{source}' to '{dest}':"
6025                        f"\n  {repr(aVal)}"
6026                    )
6027            else:
6028                raise DotParseError(
6029                    f"Unrecognized edge attribute '{attr}' for edge"
6030                    f" from '{source}' to '{dest}'"
6031                )
6032
6033        if label is None:
6034            raise DotParseError(
6035                f"Edge from '{source}' to '{dest}' is missing"
6036                f" a 'fullLabel' attribute."
6037            )
6038
6039        # Add the requested transition
6040        result.addTransition(
6041            source,
6042            label,
6043            dest,
6044            tags=tags,
6045            annotations=annotations,
6046            requires=requirements,  # None works here
6047            consequence=consequence  # None works here
6048        )
6049        # Either we're first or our reciprocal is, so this will only
6050        # trigger for one of the pair
6051        if reciprocal is not None:
6052            recipDest = result.getDestination(dest, reciprocal)
6053            if recipDest is None:
6054                recipChecks[(source, label)] = reciprocal
6055                # we'll get set as a reciprocal when that edge is
6056                # instantiated, we hope, but let's check that later
6057            elif recipDest != source:
6058                raise DotParseError(
6059                    f"Transition '{label}' from '{source}' to"
6060                    f" '{dest}' lists reciprocal '{reciprocal}'"
6061                    f" but that transition from '{dest}' goes to"
6062                    f" '{recipDest}', not '{source}'."
6063                )
6064            else:
6065                # At this point we know the reciprocal edge exists
6066                # and has the appropriate destination (our source).
6067                # No need to check for a pre-existing reciprocal as
6068                # this edge is newly created and cannot already have
6069                # a reciprocal assigned.
6070                result.setReciprocal(source, label, reciprocal)
6071
6072    # Double-check skipped reciprocals
6073    for ((source, transition), reciprocal) in recipChecks.items():
6074        actual = result.getReciprocal(source, transition)
6075        if actual != reciprocal:
6076            raise DotParseError(
6077                f"Transition '{transition}' from '{source}' was"
6078                f" expecting to have reciprocal '{reciprocal}' but"
6079                f" all edges have been processed and its reciprocal"
6080                f" is {repr(actual)}."
6081            )
6082
6083    # Finally get graph-level attribute values
6084    for (name, value) in graphStuff['attrs']:
6085        if name == "unknownCount":
6086            try:
6087                result.unknownCount = int(value)
6088            except ValueError:
6089                raise DotParseError(
6090                    f"Invalid 'unknownCount' value {repr(value)}."
6091                )
6092        elif name == "nextID":
6093            try:
6094                result.nextID = int(value)
6095            except ValueError:
6096                raise DotParseError(
6097                    f"Invalid 'nextID' value:"
6098                    f"\n  {repr(value)}"
6099                )
6100            collisionCourse = [x for x in result if x >= result.nextID]
6101            if len(collisionCourse) > 0:
6102                raise DotParseError(
6103                    f"Next ID {value} is wrong because the graph"
6104                    f" already contains one or more node(s) with"
6105                    f" ID(s) that is/are at least that large:"
6106                    f" {collisionCourse}"
6107                )
6108        elif name == "nextMechanismID":
6109            try:
6110                result.nextMechanismID = int(value)
6111            except ValueError:
6112                raise DotParseError(
6113                    f"Invalid 'nextMechanismID' value:"
6114                    f"\n  {repr(value)}"
6115                )
6116        elif name in (
6117            "equivalences",
6118            "reversionTypes",
6119            "mechanisms",
6120            "globalMechanisms",
6121            "nameLookup"
6122        ):
6123            try:
6124                setattr(result, name, fromJSON(value))
6125            except json.decoder.JSONDecodeError:
6126                raise DotParseError(
6127                    f"Invalid JSON in '{name}' attribute:"
6128                    f"\n  {repr(value)}"
6129                )
6130        else:
6131            raise DotParseError(
6132                f"Graph has unexpected attribute '{name}'."
6133            )
6134
6135    # Final check for mechanism ID value after both mechanism ID and
6136    # mechanisms dictionary have been parsed:
6137    leftBehind = [
6138        x
6139        for x in result.mechanisms
6140        if x >= result.nextMechanismID
6141    ]
6142    if len(leftBehind) > 0:
6143        raise DotParseError(
6144            f"Next mechanism ID {value} is wrong because"
6145            f" the graph already contains one or more"
6146            f" node(s) with ID(s) that is/are at least that"
6147            f" large: {leftBehind}"
6148        )
6149
6150    # And we're done!
6151    return result
6152
6153
6154def toDot(
6155    graph: core.DecisionGraph,
6156    clusterLevels: Union[str, List[int]] = [0]
6157) -> str:
6158    """
6159    Converts the decision graph into a "dot"-format string suitable
6160    for processing by `graphviz`.
6161
6162    See [the dot language
6163    specification](https://graphviz.org/doc/info/lang.html) for more
6164    detail on the syntax we convert to.
6165
6166    If `clusterLevels` is given, it should be either the string '*',
6167    or a list of integers. '*' means that all zone levels should be
6168    cluster-style subgraphs, while a list of integers specifies that
6169    zones at those levels should be cluster-style subgraphs. This
6170    will prefix the subgraph names with 'cluster_' instead of just
6171    '_'.
6172
6173    TODO: Check edge cases for quotes in capability names, tag names,
6174    transition names, annotations, etc.
6175
6176    TODO: At least colons not allowed in tag names!
6177
6178    TODO: Spaces in decision/transition names? Other special
6179    characters in those names?
6180    """
6181    # Set up result including unknownCount and nextID
6182    result = (
6183        f"digraph {{"
6184        f"\n  unknownCount={graph.unknownCount}"
6185        f"\n  nextID={graph.nextID}"
6186        f"\n  nextMechanismID={graph.nextMechanismID}"
6187        f"\n"
6188    )
6189
6190    # Dictionaries for using letters to substitute for unique
6191    # requirements/consequences found throughout the graph. Keys are
6192    # quoted requirement or consequence reprs, and values are
6193    # abbreviation strings for them.
6194    currentReqKey = utils.nextAbbrKey(None)
6195    currentEffectKey = utils.nextAbbrKey(None)
6196    reqKeys: Dict[str, str] = {}
6197    consequenceKeys: Dict[str, str] = {}
6198
6199    # Add all decision and transition info
6200    decision: base.DecisionID  # TODO: Fix Multidigraph type stubs
6201    for decision in graph.nodes:
6202        nodeInfo = graph.nodes[decision]
6203        tags = nodeInfo.get('tags', {})
6204        annotations = toJSON(nodeInfo.get('annotations', []))
6205        zones = nodeInfo.get('zones', set())
6206        nodeAttrs = f"\n    name={utils.quoted(nodeInfo['name'])}"
6207        immediateZones = [z for z in zones if graph.zoneHierarchyLevel(z) == 0]
6208        if len(immediateZones) > 0:
6209            useZone = sorted(immediateZones)[0]
6210            # TODO: Don't hardcode :: here?
6211            withZone = useZone + "::" + nodeInfo['name']
6212            nodeAttrs += f"\n    label={utils.quoted(withZone)}"
6213        else:
6214            nodeAttrs += f"\n    label={utils.quoted(nodeInfo['name'])}"
6215        for tag, value in tags.items():
6216            rep = utils.quoted(toJSON(value))
6217            nodeAttrs += f"\n    t_{tag}={rep}"
6218        for z in sorted(zones):
6219            nodeAttrs += f"\n    z_{z}=1"
6220        if annotations:
6221            nodeAttrs += '\n    annotations=' + utils.quoted(annotations)
6222
6223        result += f'\n  {decision} [{nodeAttrs}\n  ]'
6224
6225        for (transition, destination) in graph._byEdge[decision].items():
6226            edgeAttrs = (
6227                '\n      label='
6228              + utils.quoted(utils.abbr(transition))
6229            )
6230            edgeAttrs += (
6231                '\n      fullLabel='
6232              + utils.quoted(transition)
6233            )
6234            reciprocal = graph.getReciprocal(decision, transition)
6235            if reciprocal is not None:
6236                edgeAttrs += (
6237                    '\n      reciprocal='
6238                  + utils.quoted(reciprocal)
6239                )
6240            info = graph.edges[
6241                decision,  # type:ignore
6242                destination,
6243                transition
6244            ]
6245            if 'requirement' in info:
6246                # Get string rep for requirement
6247                rep = utils.quoted(info['requirement'].unparse())
6248                # Get assigned abbreviation or assign one
6249                if rep in reqKeys:
6250                    ab = reqKeys[rep]
6251                else:
6252                    ab = currentReqKey
6253                    reqKeys[rep] = ab
6254                    currentReqKey = utils.nextAbbrKey(currentReqKey)
6255                # Add abbreviation as edge attribute
6256                edgeAttrs += f'\n      req={ab}'
6257            if 'consequence' in info:
6258                # Get string representation of consequences
6259                rep = utils.quoted(
6260                    toJSON(info['consequence'])
6261                )
6262                # Get abbreviation for that or assign one:
6263                if rep in consequenceKeys:
6264                    ab = consequenceKeys[rep]
6265                else:
6266                    ab = currentEffectKey
6267                    consequenceKeys[rep] = ab
6268                    currentEffectKey = utils.nextAbbrKey(
6269                        currentEffectKey
6270                    )
6271                # Add abbreviation as an edge attribute
6272                edgeAttrs += f'\n      consequence={ab}'
6273            for (tag, value) in info["tags"].items():
6274                # Get string representation of tag value
6275                rep = utils.quoted(toJSON(value))
6276                # Add edge attribute for tag
6277                edgeAttrs += f'\n      t_{tag}={rep}'
6278            if 'annotations' in info:
6279                edgeAttrs += (
6280                    '\n      annotations='
6281                  + utils.quoted(toJSON(info['annotations']))
6282                )
6283            result += f'\n    {decision} -> {destination}'
6284            result += f' [{edgeAttrs}\n    ]'
6285
6286    # Add zone info as subgraph structure
6287    for z, zinfo in graph.zones.items():
6288        parents = utils.quoted(toJSON(sorted(zinfo.parents)))
6289        if clusterLevels == '*' or zinfo.level in clusterLevels:
6290            zName = "cluster_" + z
6291        else:
6292            zName = '_' + z
6293        zoneSubgraph = f'\n  subgraph {utils.quoted(zName)} {{'
6294        zoneSubgraph += f'\n    label={z}'
6295        zoneSubgraph += f'\n    level={zinfo.level}'
6296        zoneSubgraph += f'\n    parents={parents}'
6297        for decision in sorted(graph.allDecisionsInZone(z)):
6298            zoneSubgraph += f'\n    {decision}'
6299        zoneSubgraph += '\n  }'
6300        result += zoneSubgraph
6301
6302    # Add equivalences, mechanisms, etc.
6303    for attr in [
6304        "equivalences",
6305        "reversionTypes",
6306        "mechanisms",
6307        "globalMechanisms",
6308        "nameLookup"
6309    ]:
6310        aRep = utils.quoted(toJSON(getattr(graph, attr)))
6311        result += f'\n  {attr}={aRep}'
6312
6313    # Add legend subgraphs to represent abbreviations
6314    useID = graph.nextID
6315    if reqKeys:
6316        result += '\n  subgraph __requirements__ {'
6317        for rrepr, ab in reqKeys.items():
6318            nStr = utils.quoted(ab + ' = ' + rrepr)
6319            result += (
6320                f"\n    {useID} [ label={nStr} ]"
6321            )
6322            useID += 1
6323        result += '\n  }'
6324
6325    if consequenceKeys:
6326        result += '\n  subgraph __consequences__ {'
6327        for erepr, ab in consequenceKeys.items():
6328            nStr = utils.quoted(ab + ' = ' + erepr)
6329            result += (
6330                f"\n    {useID} [ label={nStr} ]"
6331            )
6332            useID += 1
6333        result += '\n  }'
6334
6335    if graph.mechanisms:
6336        result += '\n  subgraph __mechanisms__ {'
6337        mID: base.MechanismID
6338        mWhere: Optional[base.DecisionID]
6339        mName: base.MechanismName
6340        for (mID, (mWhere, mName)) in graph.mechanisms.items():
6341            qName = utils.quoted(mName)
6342            nStr = utils.quoted(f"{mID}@{mWhere}:{qName}")
6343            result += (
6344                f"\n    {useID} [ label={nStr} ]"
6345            )
6346            useID += 1
6347        result += '\n  }'
6348
6349    result += "\n}\n"
6350    return result
6351
6352
6353#------#
6354# JSON #
6355#------#
6356
6357T = TypeVar("T")
6358"Type var for `loadCustom`."
6359
6360
6361def loadCustom(stream: TextIO, loadAs: Type[T]) -> T:
6362    """
6363    Loads a new JSON-encodable object from the JSON data in the
6364    given text stream (e.g., a file open in read mode). See
6365    `CustomJSONDecoder` for details on the format and which object types
6366    are supported.
6367
6368    This casts the result to the specified type, but errors out with a
6369    `TypeError` if it doesn't match.
6370    """
6371    result = json.load(stream, cls=CustomJSONDecoder)
6372    if isinstance(result, loadAs):
6373        return result
6374    else:
6375        raise TypeError(
6376            f"Expected to load a {loadAs} but got a {type(result)}."
6377        )
6378
6379
6380def saveCustom(
6381    toSave: Union[  # TODO: More in this union?
6382        base.MetricSpace,
6383        core.DecisionGraph,
6384        core.DiscreteExploration,
6385    ],
6386    stream: TextIO
6387) -> None:
6388    """
6389    Saves a JSON-encodable object as JSON into the given text stream
6390    (e.g., a file open in writing mode). See `CustomJSONEncoder` for
6391    details on the format and which types are supported..
6392    """
6393    json.dump(toSave, stream, cls=CustomJSONEncoder)
6394
6395
6396def toJSON(obj: Any) -> str:
6397    """
6398    Defines the standard object -> JSON operation using the
6399    `CustomJSONEncoder` as well as not using `sort_keys`.
6400    """
6401    return CustomJSONEncoder(sort_keys=False).encode(obj)
6402
6403
6404def fromJSON(encoded: str) -> Any:
6405    """
6406    Defines the standard JSON -> object operation using
6407    `CustomJSONDecoder`.
6408    """
6409    return json.loads(encoded, cls=CustomJSONDecoder)
6410
6411
6412class CustomJSONEncoder(json.JSONEncoder):
6413    """
6414    A custom JSON encoder that has special protocols for handling the
6415    smae objects that `CustomJSONDecoder` decodes. It handles these
6416    objects specially so that they can be decoded back to their original
6417    form.
6418
6419    Examples:
6420
6421    >>> from . import core
6422    >>> tupList = [(1, 1), (2, 2)]
6423    >>> encTup = toJSON(tupList)
6424    >>> encTup
6425    '[{"^^d": "tuple", "values": [1, 1]}, {"^^d": "tuple", "values": [2, 2]}]'
6426    >>> fromJSON(encTup) == tupList
6427    True
6428    >>> dg = core.DecisionGraph.example('simple')
6429    >>> fromJSON(toJSON(dg)) == dg
6430    True
6431    >>> dg = core.DecisionGraph.example('abc')
6432    >>> zi = dg.getZoneInfo('upZone')
6433    >>> zi
6434    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6435 annotations=[])
6436    >>> zj = toJSON(zi)
6437    >>> zj
6438    '{"^^d": "namedtuple", "name": "ZoneInfo", "values":\
6439 {"level": 1, "parents": {"^^d": "set", "values": []},\
6440 "contents": {"^^d": "set", "values": ["zoneA"]}, "tags": {},\
6441 "annotations": []}}'
6442    >>> fromJSON(toJSON(zi))
6443    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6444 annotations=[])
6445    >>> fromJSON(toJSON(zi)) == zi
6446    True
6447    >>> toJSON({'a': 'b', 1: 2})
6448    '{"^^d": "dict", "items": [["a", "b"], [1, 2]]}'
6449    >>> toJSON(((1, 2), (3, 4)))
6450    '{"^^d": "tuple", "values": [{"^^d": "tuple", "values": [1, 2]},\
6451 {"^^d": "tuple", "values": [3, 4]}]}'
6452    >>> toJSON(base.effect(set=('grate', 'open')))
6453    '{"type": "set", "applyTo": "active",\
6454 "value": {"^^d": "tuple",\
6455 "values": [{"^^d": "namedtuple", "name": "MechanismSpecifier",\
6456 "values": {"domain": null, "zone": null, "decision": null, "name": "grate"}},\
6457 "open"]}, "delay": null, "charges": null, "hidden": false}'
6458    >>> j = toJSON(dg)
6459    >>> expected = (
6460    ... '{"^^d": "DecisionGraph",'
6461    ... ' "props": {},'
6462    ... ' "node_links": {"directed": true,'
6463    ... ' "multigraph": true,'
6464    ... ' "graph": {},'
6465    ... ' "nodes": ['
6466    ... '{"name": "A", "domain": "main", "tags": {},'
6467    ... ' "annotations": ["This is a multi-word \\\\"annotation.\\\\""],'
6468    ... ' "zones": {"^^d": "set", "values": ["zoneA"]},'
6469    ... ' "mechanisms": {"grate": 0},'
6470    ... ' "id": 0'
6471    ... '},'
6472    ... ' {'
6473    ... '"name": "B",'
6474    ... ' "domain": "main",'
6475    ... ' "tags": {"b": 1, "tag2": "\\\\"value\\\\""},'
6476    ... ' "annotations": [],'
6477    ... ' "zones": {"^^d": "set", "values": ["zoneB"]},'
6478    ... ' "id": 1'
6479    ... '},'
6480    ... ' {'
6481    ... '"name": "C",'
6482    ... ' "domain": "main",'
6483    ... ' "tags": {"aw\\\\"ful": "ha\\'ha"},'
6484    ... ' "annotations": [],'
6485    ... ' "zones": {"^^d": "set", "values": ["zoneA"]},'
6486    ... ' "id": 2'
6487    ... '}'
6488    ... '],'
6489    ... ' "links": ['
6490    ... '{'
6491    ... '"tags": {},'
6492    ... ' "annotations": [],'
6493    ... ' "reciprocal": "right",'
6494    ... ' "source": 0,'
6495    ... ' "target": 1,'
6496    ... ' "key": "left"'
6497    ... '},'
6498    ... ' {'
6499    ... '"tags": {},'
6500    ... ' "annotations": [],'
6501    ... ' "reciprocal": "up_right",'
6502    ... ' "requirement": {"^^d": "Requirement", "value": "grate:open"},'
6503    ... ' "source": 0,'
6504    ... ' "target": 1,'
6505    ... ' "key": "up_left"'
6506    ... '},'
6507    ... ' {'
6508    ... '"tags": {},'
6509    ... ' "annotations": ["Transition \\'annotation.\\'"],'
6510    ... ' "reciprocal": "up",'
6511    ... ' "source": 0,'
6512    ... ' "target": 2,'
6513    ... ' "key": "down"'
6514    ... '},'
6515    ... ' {'
6516    ... '"tags": {},'
6517    ... ' "annotations": [],'
6518    ... ' "reciprocal": "left",'
6519    ... ' "source": 1,'
6520    ... ' "target": 0,'
6521    ... ' "key": "right"'
6522    ... '},'
6523    ... ' {'
6524    ... '"tags": {},'
6525    ... ' "annotations": [],'
6526    ... ' "reciprocal": "up_left",'
6527    ... ' "requirement": {"^^d": "Requirement", "value": "grate:open"},'
6528    ... ' "source": 1,'
6529    ... ' "target": 0,'
6530    ... ' "key": "up_right"'
6531    ... '},'
6532    ... ' {'
6533    ... '"tags": {"fast": 1},'
6534    ... ' "annotations": [],'
6535    ... ' "reciprocal": "down",'
6536    ... ' "source": 2,'
6537    ... ' "target": 0,'
6538    ... ' "key": "up"'
6539    ... '},'
6540    ... ' {'
6541    ... '"tags": {},'
6542    ... ' "annotations": [],'
6543    ... ' "requirement": {"^^d": "Requirement", "value": "!(helmet)"},'
6544    ... ' "consequence": ['
6545    ... '{'
6546    ... '"type": "gain", "applyTo": "active", "value": "helmet",'
6547    ... ' "delay": null, "charges": null, "hidden": false'
6548    ... '},'
6549    ... ' {'
6550    ... '"type": "deactivate",'
6551    ... ' "applyTo": "active", "value": null,'
6552    ... ' "delay": 3, "charges": null, "hidden": false'
6553    ... '}'
6554    ... '],'
6555    ... ' "source": 2,'
6556    ... ' "target": 2,'
6557    ... ' "key": "grab_helmet"'
6558    ... '},'
6559    ... ' {'
6560    ... '"tags": {},'
6561    ... ' "annotations": [],'
6562    ... ' "requirement": {"^^d": "Requirement", "value": "helmet"},'
6563    ... ' "consequence": ['
6564    ... '{"type": "lose", "applyTo": "active", "value": "helmet",'
6565    ... ' "delay": null, "charges": null, "hidden": false},'
6566    ... ' {"type": "gain", "applyTo": "active",'
6567    ... ' "value": {"^^d": "tuple", "values": ["token", 1]},'
6568    ... ' "delay": null, "charges": null, "hidden": false'
6569    ... '},'
6570    ... ' {"condition":'
6571    ... ' {"^^d": "Requirement", "value": "token*2"},'
6572    ... ' "consequence": ['
6573    ... '{"type": "set", "applyTo": "active",'
6574    ... ' "value": {"^^d": "tuple", "values": ['
6575    ... '{"^^d": "namedtuple", "name": "MechanismSpecifier",'
6576    ... ' "values": {"domain": null, "zone": null, "decision": null,'
6577    ... ' "name": "grate"}}, "open"]},'
6578    ... ' "delay": null, "charges": null, "hidden": false'
6579    ... '},'
6580    ... ' {"type": "deactivate", "applyTo": "active", "value": null,'
6581    ... ' "delay": null, "charges": null, "hidden": false'
6582    ... '}'
6583    ... '],'
6584    ... ' "alternative": []'
6585    ... '}'
6586    ... '],'
6587    ... ' "source": 2,'
6588    ... ' "target": 2,'
6589    ... ' "key": "pull_lever"'
6590    ... '}'
6591    ... ']'
6592    ... '},'
6593    ... ' "_byEdge": {"^^d": "dict", "items":'
6594    ... ' [[0, {"left": 1, "up_left": 1, "down": 2}],'
6595    ... ' [1, {"right": 0, "up_right": 0}],'
6596    ... ' [2, {"up": 0, "grab_helmet": 2, "pull_lever": 2}]]},'
6597    ... ' "zones": {"zoneA":'
6598    ... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
6599    ... ' "values": {'
6600    ... '"level": 0,'
6601    ... ' "parents": {"^^d": "set", "values": ["upZone"]},'
6602    ... ' "contents": {"^^d": "set", "values": [0, 2]},'
6603    ... ' "tags": {},'
6604    ... ' "annotations": []'
6605    ... '}'
6606    ... '},'
6607    ... ' "zoneB":'
6608    ... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
6609    ... ' "values": {'
6610    ... '"level": 0,'
6611    ... ' "parents": {"^^d": "set", "values": []},'
6612    ... ' "contents": {"^^d": "set", "values": [1]},'
6613    ... ' "tags": {},'
6614    ... ' "annotations": []'
6615    ... '}'
6616    ... '},'
6617    ... ' "upZone":'
6618    ... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
6619    ... ' "values": {'
6620    ... '"level": 1,'
6621    ... ' "parents": {"^^d": "set", "values": []},'
6622    ... ' "contents": {"^^d": "set", "values": ["zoneA"]},'
6623    ... ' "tags": {},'
6624    ... ' "annotations": []'
6625    ... '}'
6626    ... '}'
6627    ... '},'
6628    ... ' "unknownCount": 0,'
6629    ... ' "equivalences": {"^^d": "dict", "items": ['
6630    ... '[{"^^d": "tuple", "values": [0, "open"]},'
6631    ... ' {"^^d": "set", "values": ['
6632    ... '{"^^d": "Requirement", "value": "helmet"}]}]'
6633    ... ']},'
6634    ... ' "reversionTypes": {},'
6635    ... ' "nextMechanismID": 1,'
6636    ... ' "mechanisms": {"^^d": "dict", "items": ['
6637    ... '[0, {"^^d": "tuple", "values": [0, "grate"]}]]},'
6638    ... ' "globalMechanisms": {},'
6639    ... ' "nameLookup": {"A": [0], "B": [1], "C": [2]}'
6640    ... '}'
6641    ... )
6642    >>> for i in range(len(j)):
6643    ...     if j[i] != expected[i:i+1]:
6644    ...         print(
6645    ...             'exp: ' + expected[i-10:i+50] + '\\ngot: ' + j[i-10:i+50]
6646    ...         )
6647    ...         break
6648    >>> j == expected
6649    True
6650    >>> rec = fromJSON(j)
6651    >>> rec.nodes == dg.nodes
6652    True
6653    >>> rec.edges == dg.edges
6654    True
6655    >>> rec.unknownCount == dg.unknownCount
6656    True
6657    >>> rec.equivalences == dg.equivalences
6658    True
6659    >>> rec.reversionTypes == dg.reversionTypes
6660    True
6661    >>> rec._byEdge == dg._byEdge
6662    True
6663    >>> rec.zones == dg.zones
6664    True
6665    >>> for diff in dg.listDifferences(rec):
6666    ...     print(diff)
6667    >>> rec == dg
6668    True
6669
6670    `base.MetricSpace` example:
6671
6672    >>> ms = base.MetricSpace("test")
6673    >>> ms.addPoint([2, 3])
6674    0
6675    >>> ms.addPoint([2, 7, 0])
6676    1
6677    >>> ms.addPoint([2, 7])
6678    2
6679    >>> toJSON(ms) # TODO: ^^d entries here
6680    '{"^^d": "MetricSpace", "name": "test",\
6681 "points": {"^^d": "dict", "items": [[0, [2, 3]], [1, [2, 7,\
6682 0]], [2, [2, 7]]]}, "lastID": 2}'
6683    >>> ms.removePoint(0)
6684    >>> ms.removePoint(1)
6685    >>> ms.removePoint(2)
6686    >>> toJSON(ms)
6687    '{"^^d": "MetricSpace", "name": "test", "points": {}, "lastID": 2}'
6688    >>> ms.addPoint([5, 6])
6689    3
6690    >>> ms.addPoint([7, 8])
6691    4
6692    >>> toJSON(ms)
6693    '{"^^d": "MetricSpace", "name": "test",\
6694 "points": {"^^d": "dict", "items": [[3, [5, 6]], [4, [7, 8]]]}, "lastID": 4}'
6695
6696    # TODO: more examples, including one for a DiscreteExploration
6697    """
6698
6699    def default(self, o: Any) -> Any:
6700        """
6701        Re-writes objects for encoding. We re-write the following
6702        objects:
6703
6704        - `set`
6705        - `dict` (if the keys aren't all strings)
6706        - `tuple`/`namedtuple`
6707        - `ZoneInfo`
6708        - `Requirement`
6709        - `SkillCombination`
6710        - `DecisionGraph`
6711        - `DiscreteExploration`
6712        - `MetricSpace`
6713
6714        TODO: FeatureGraph...
6715        """
6716        if isinstance(o, list):
6717            return [self.default(x) for x in o]
6718
6719        elif isinstance(o, set):
6720            return {
6721                '^^d': 'set',
6722                'values': sorted(
6723                    [self.default(e) for e in o],
6724                    key=lambda x: str(x)
6725                )
6726            }
6727
6728        elif isinstance(o, dict):
6729            if all(isinstance(k, str) for k in o):
6730                return {
6731                    k: self.default(v)
6732                    for k, v in o.items()
6733                }
6734            else:
6735                return {
6736                    '^^d': 'dict',
6737                    'items': [
6738                        [self.default(k), self.default(v)]
6739                        for (k, v) in o.items()
6740                    ]
6741                }
6742
6743        elif isinstance(o, tuple):
6744            if hasattr(o, '_fields') and hasattr(o, '_asdict'):
6745                # Named tuple
6746                return {
6747                    '^^d': 'namedtuple',
6748                    'name': o.__class__.__name__,
6749                    'values': {
6750                        k: self.default(v)
6751                        for k, v in o._asdict().items()
6752                    }
6753                }
6754            else:
6755                # Normal tuple
6756                return {
6757                    "^^d": "tuple",
6758                    "values": [self.default(e) for e in o]
6759                }
6760
6761        elif isinstance(o, base.Requirement):
6762            return {
6763                '^^d': 'Requirement',
6764                'value': o.unparse()
6765            }
6766
6767        elif isinstance(o, base.SkillCombination):
6768            return {
6769                '^^d': 'SkillCombination',
6770                'value': o.unparse()
6771            }
6772
6773        elif isinstance(o, core.DecisionGraph):
6774            return {
6775                '^^d': 'DecisionGraph',
6776                'props': self.default(o.graph),  # type:ignore [attr-defined]
6777                'node_links': self.default(networkx.node_link_data(o)),
6778                '_byEdge': self.default(o._byEdge),
6779                'zones': self.default(o.zones),
6780                'unknownCount': o.unknownCount,
6781                'equivalences': self.default(o.equivalences),
6782                'reversionTypes': self.default(o.reversionTypes),
6783                'nextMechanismID': o.nextMechanismID,
6784                'mechanisms': self.default(o.mechanisms),
6785                'globalMechanisms': self.default(o.globalMechanisms),
6786                'nameLookup': self.default(o.nameLookup)
6787            }
6788
6789        elif isinstance(o, core.DiscreteExploration):
6790            return {
6791                '^^d': 'DiscreteExploration',
6792                'situations': self.default(o.situations)
6793            }
6794
6795        elif isinstance(o, base.MetricSpace):
6796            return {
6797                '^^d': 'MetricSpace',
6798                'name': o.name,
6799                'points': self.default(o.points),
6800                'lastID': o.lastID()
6801            }
6802
6803        else:
6804            return o
6805
6806    def encode(self, o: Any) -> str:
6807        """
6808        Custom encode function since we need to override behavior for
6809        tuples and dicts.
6810        """
6811        if isinstance(o, (tuple, dict, set)):
6812            o = self.default(o)
6813        elif isinstance(o, list):
6814            o = [self.default(x) for x in o]
6815
6816        try:
6817            return super().encode(o)
6818        except TypeError:
6819            return super().encode(self.default(o))
6820
6821    def iterencode(
6822        self,
6823        o: Any,
6824        _one_shot: bool = False
6825    ) -> Generator[str, None, None]:
6826        """
6827        Custom iterencode function since we need to override behavior for
6828        tuples and dicts.
6829        """
6830        if isinstance(o, (tuple, dict)):
6831            o = self.default(o)
6832
6833        yield from super().iterencode(o, _one_shot=_one_shot)
6834
6835
6836class CustomJSONDecoder(json.JSONDecoder):
6837    """
6838    A custom JSON decoder that has special protocols for handling
6839    several types, including:
6840
6841    - `set`
6842    - `tuple` & `namedtuple`
6843    - `dict` (where keys aren't all strings)
6844    - `Requirement`
6845    - `SkillCombination`
6846    - `DecisionGraph`
6847    - `DiscreteExploration`
6848    - `MetricSpace`
6849
6850    Used by `toJSON`
6851
6852    When initializing it, you can st a custom parse format by supplying
6853    a 'parseFormat' keyword argument; by default a standard
6854    `ParseFormat` will be used.
6855
6856    Examples:
6857
6858    >>> r = base.ReqAny([
6859    ...     base.ReqCapability('power'),
6860    ...     base.ReqTokens('money', 5)
6861    ... ])
6862    >>> s = toJSON(r)
6863    >>> s
6864    '{"^^d": "Requirement", "value": "(power|money*5)"}'
6865    >>> l = fromJSON(s)
6866    >>> r == l
6867    True
6868    >>> o = {1, 2, 'hi'}
6869    >>> s = toJSON(o)
6870    >>> s
6871    '{"^^d": "set", "values": [1, 2, "hi"]}'
6872    >>> l = fromJSON(s)
6873    >>> o == l
6874    True
6875    >>> zi = base.ZoneInfo(1, set(), set(), {}, [])
6876    >>> s = toJSON(zi)
6877    >>> c = (
6878    ... '{"^^d": "namedtuple", "name": "ZoneInfo", "values": {'
6879    ... '"level": 1,'
6880    ... ' "parents": {"^^d": "set", "values": []},'
6881    ... ' "contents": {"^^d": "set", "values": []},'
6882    ... ' "tags": {},'
6883    ... ' "annotations": []'
6884    ... '}}'
6885    ... )
6886    >>> s == c
6887    True
6888
6889    TODO: SkillCombination example
6890    """
6891    def __init__(self, *args, **kwargs):
6892        if 'object_hook' in kwargs:
6893            outerHook = kwargs['object_hook']
6894            kwargs['object_hook'] = (
6895                lambda o: outerHook(self.unpack(o))
6896            )
6897            # TODO: What if it's a positional argument? :(
6898        else:
6899            kwargs['object_hook'] = lambda o: self.unpack(o)
6900
6901        if 'parseFormat' in kwargs:
6902            self.parseFormat = kwargs['parseFormat']
6903            del kwargs['parseFormat']
6904        else:
6905            self.parseFormat = ParseFormat()
6906
6907        super().__init__(*args, **kwargs)
6908
6909    def unpack(self, obj: Any) -> Any:
6910        """
6911        Unpacks an object; used as the `object_hook` for decoding.
6912        """
6913        if '^^d' in obj:
6914            asType = obj['^^d']
6915            if asType == 'tuple':
6916                return tuple(obj['values'])
6917
6918            elif asType == 'namedtuple':
6919                g = globals()
6920                name = obj['name']
6921                values = obj['values']
6922                # Use an existing global namedtuple class if there is
6923                # one that goes by the specified name, so that we don't
6924                # create too many spurious equivalent namedtuple
6925                # classes. But fall back on creating a new namedtuple
6926                # class if we need to:
6927                ntClass = g.get(name)
6928                if (
6929                    ntClass is None
6930                 or not issubclass(ntClass, tuple)
6931                 or not hasattr(ntClass, '_asdict')
6932                ):
6933                    ntClass = collections.namedtuple(  # type: ignore
6934                        name,
6935                        values.keys()
6936                    )
6937                ntClass = cast(Callable, ntClass)
6938                return ntClass(**values)
6939
6940            elif asType == 'set':
6941                return set(obj['values'])
6942
6943            elif asType == 'dict':
6944                return dict(obj['items'])
6945
6946            elif asType == 'Requirement':
6947                return self.parseFormat.parseRequirement(obj['value'])
6948
6949            elif asType == 'SkillCombination':
6950                return self.parseFormat.parseSkillCombination(obj['value'])
6951
6952            elif asType == 'Effect':
6953                return self.parseFormat.parseEffect(obj['value'])
6954
6955            elif asType == 'Challenge':
6956                return self.parseFormat.parseChallenge(obj['value'])
6957
6958            elif asType == 'Condition':
6959                return self.parseFormat.parseCondition(obj['value'])
6960
6961            elif asType == 'Consequence':
6962                return self.parseFormat.parseConsequence(obj['value'])
6963
6964            # TODO: Consequences here!
6965
6966            elif asType == 'DecisionGraph':
6967                baseGraph: networkx.MultiDiGraph = networkx.node_link_graph(
6968                    obj['node_links']
6969                )
6970                graphResult = core.DecisionGraph()
6971                # Copy over non-internal attributes
6972                for attr in dir(baseGraph):
6973                    if attr == "name":
6974                        continue
6975                    if not attr.startswith('__') or not attr.endswith('__'):
6976                        val = getattr(baseGraph, attr)
6977                        setattr(
6978                            graphResult,
6979                            attr,
6980                            copy.deepcopy(val)
6981                        )
6982
6983                if baseGraph.name != '':
6984                    graphResult.name = baseGraph.name
6985                graphResult.graph.update(obj['props'])  # type:ignore [attr-defined]  # noqa
6986                storedByEdge = obj['_byEdge']
6987                graphResult._byEdge = {
6988                    int(k): storedByEdge[k]
6989                    for k in storedByEdge
6990                }
6991                graphResult.zones = obj['zones']
6992                graphResult.unknownCount = obj['unknownCount']
6993                graphResult.equivalences = obj['equivalences']
6994                graphResult.reversionTypes = obj['reversionTypes']
6995                graphResult.nextMechanismID = obj['nextMechanismID']
6996                graphResult.mechanisms = {
6997                    int(k): v
6998                    for k, v in
6999                    obj['mechanisms'].items()
7000                }
7001                graphResult.globalMechanisms = obj['globalMechanisms']
7002                graphResult.nameLookup = obj['nameLookup']
7003                return graphResult
7004
7005            elif asType == 'DiscreteExploration':
7006                exResult = core.DiscreteExploration()
7007                exResult.situations = obj['situations']
7008                return exResult
7009
7010            elif asType == 'MetricSpace':
7011                msResult = base.MetricSpace(obj['name'])
7012                msResult.points = obj['points']
7013                msResult.nextID = obj['lastID'] + 1
7014                return msResult
7015
7016            else:
7017                raise NotImplementedError(
7018                    f"No special handling has been defined for"
7019                    f" decoding type '{asType}'."
7020                )
7021
7022        else:
7023            return obj
class Lexeme(enum.IntEnum):

These are the different separators, grouping characters, and keywords used as part of parsing. The characters that are actually recognized are defined as part of a Format.

domainSeparator = <Lexeme.domainSeparator: 1>
zoneSeparator = <Lexeme.zoneSeparator: 2>
partSeparator = <Lexeme.partSeparator: 3>
stateOn = <Lexeme.stateOn: 4>
stateOff = <Lexeme.stateOff: 5>
tokenCount = <Lexeme.tokenCount: 6>
effectCharges = <Lexeme.effectCharges: 7>
sepOrDelay = <Lexeme.sepOrDelay: 8>
consequenceSeparator = <Lexeme.consequenceSeparator: 9>
inCommon = <Lexeme.inCommon: 10>
isHidden = <Lexeme.isHidden: 11>
skillLevel = <Lexeme.skillLevel: 12>
wigglyLine = <Lexeme.wigglyLine: 13>
withDetails = <Lexeme.withDetails: 14>
reciprocalSeparator = <Lexeme.reciprocalSeparator: 15>
mechanismSeparator = <Lexeme.mechanismSeparator: 16>
openCurly = <Lexeme.openCurly: 17>
closeCurly = <Lexeme.closeCurly: 18>
openParen = <Lexeme.openParen: 19>
closeParen = <Lexeme.closeParen: 20>
angleLeft = <Lexeme.angleLeft: 21>
angleRight = <Lexeme.angleRight: 22>
doubleQuestionmark = <Lexeme.doubleQuestionmark: 23>
ampersand = <Lexeme.ampersand: 24>
orBar = <Lexeme.orBar: 25>
notMarker = <Lexeme.notMarker: 26>
Inherited Members
enum.Enum
name
value
builtins.int
conjugate
bit_length
bit_count
to_bytes
from_bytes
as_integer_ratio
real
imag
numerator
denominator
Format = typing.Dict[Lexeme, str]

A journal format is specified using a dictionary with keys that denote journal marker types and values which are one-to-several-character strings indicating the markup used for that entry/info type.

DEFAULT_FORMAT: Dict[Lexeme, str] = {<Lexeme.domainSeparator: 1>: '//', <Lexeme.zoneSeparator: 2>: '::', <Lexeme.partSeparator: 3>: '%%', <Lexeme.stateOn: 4>: '=on', <Lexeme.stateOff: 5>: '=off', <Lexeme.tokenCount: 6>: '*', <Lexeme.effectCharges: 7>: '=', <Lexeme.sepOrDelay: 8>: ',', <Lexeme.consequenceSeparator: 9>: ';', <Lexeme.inCommon: 10>: '+c', <Lexeme.isHidden: 11>: '+h', <Lexeme.skillLevel: 12>: '^', <Lexeme.wigglyLine: 13>: '~', <Lexeme.withDetails: 14>: '%', <Lexeme.reciprocalSeparator: 15>: '/', <Lexeme.mechanismSeparator: 16>: ':', <Lexeme.openCurly: 17>: '{', <Lexeme.closeCurly: 18>: '}', <Lexeme.openParen: 19>: '(', <Lexeme.closeParen: 20>: ')', <Lexeme.angleLeft: 21>: '<', <Lexeme.angleRight: 22>: '>', <Lexeme.doubleQuestionmark: 23>: '??', <Lexeme.ampersand: 24>: '&', <Lexeme.orBar: 25>: '|', <Lexeme.notMarker: 26>: '!'}

The default parsing format.

DEFAULT_EFFECT_NAMES: Dict[str, Literal['gain', 'lose', 'set', 'toggle', 'deactivate', 'edit', 'goto', 'bounce', 'follow', 'save']] = {'gain': 'gain', 'lose': 'lose', 'set': 'set', 'toggle': 'toggle', 'deactivate': 'deactivate', 'edit': 'edit', 'goto': 'goto', 'bounce': 'bounce', 'follow': 'follow', 'save': 'save'}

Default names for each effect type. Maps names to canonical effect type strings. A different mapping could be used to allow for writing effect names in another language, for example.

DEFAULT_FOCALIZATION_NAMES: Dict[str, Literal['singular', 'plural', 'spreading']] = {'singular': 'singular', 'plural': 'plural', 'spreading': 'spreading'}

Default names for each domain focalization type. Maps each focalization type string to itself.

DEFAULT_SF_INDICATORS: Tuple[str, str] = ('s', 'f')

Default characters used to indicate success/failure when transcribing a TransitionWithOutcomes.

class ParseWarning(builtins.Warning):
139class ParseWarning(Warning):
140    """
141    Represents a warning encountered when parsing something.
142    """
143    pass

Represents a warning encountered when parsing something.

Inherited Members
builtins.Warning
Warning
builtins.BaseException
with_traceback
add_note
args
class ParseError(builtins.ValueError):
146class ParseError(ValueError):
147    """
148    Represents a error encountered when parsing.
149    """
150    pass

Represents a error encountered when parsing.

Inherited Members
builtins.ValueError
ValueError
builtins.BaseException
with_traceback
add_note
args
class DotParseError(ParseError):
153class DotParseError(ParseError):
154    """
155    An error raised during parsing when incorrectly-formatted graphviz
156    "dot" data is provided. See `parseDot`.
157    """
158    pass

An error raised during parsing when incorrectly-formatted graphviz "dot" data is provided. See parseDot.

Inherited Members
builtins.ValueError
ValueError
builtins.BaseException
with_traceback
add_note
args
class InvalidFeatureSpecifierError(ParseError):
161class InvalidFeatureSpecifierError(ParseError):
162    """
163    An error used when a feature specifier is in the wrong format.
164    Errors with part specifiers also use this.
165    """

An error used when a feature specifier is in the wrong format. Errors with part specifiers also use this.

Inherited Members
builtins.ValueError
ValueError
builtins.BaseException
with_traceback
add_note
args
LexedTokens: TypeAlias = List[Union[Lexeme, str]]

When lexing, we pull apart a string into pieces, but when we recognize lexemes, we use their integer IDs in the list instead of strings, so we get a list that's a mix of ints and strings.

GroupedTokens: TypeAlias = List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]

Some parsing processes group tokens into sub-lists. This type represents LexedTokens which might also contain sub-lists, to arbitrary depth.

GroupedRequirementParts: TypeAlias = List[Union[Lexeme, exploration.base.Requirement, ForwardRef('GroupedRequirementParts')]]

Another intermediate parsing result during requirement parsing: a list of base.Requirements possibly with some sub-lists and/or Lexemes mixed in.

def lex( characters: str, tokenMap: Optional[Dict[str, Lexeme]] = None) -> List[Union[Lexeme, str]]:
195def lex(
196    characters: str,
197    tokenMap: Optional[Dict[str, Lexeme]] = None
198) -> LexedTokens:
199    """
200    Lexes a list of tokens from a characters string. Recognizes any
201    special characters you provide in the token map, as well as
202    collections of non-mapped characters. Recognizes double-quoted
203    strings which can contain any of those (and which use
204    backslash-escapes for internal double quotes) and includes quoted
205    versions of those strings as tokens (any token string starting with a
206    double quote will be such a string). Breaks tokens on whitespace
207    outside of quotation marks, and ignores that whitespace.
208
209    Examples:
210
211    >>> lex('abc')
212    ['abc']
213    >>> lex('(abc)', {'(': 0, ')': 1})
214    [0, 'abc', 1]
215    >>> lex('{(abc)}', {'(': 0, ')': 1, '{': 2, '}': 3})
216    [2, 0, 'abc', 1, 3]
217    >>> lex('abc def')
218    ['abc', 'def']
219    >>> lex('abc   def')
220    ['abc', 'def']
221    >>> lex('abc \\n def')
222    ['abc', 'def']
223    >>> lex ('"quoted"')
224    ['"quoted"']
225    >>> lex ('"quoted  pair"')
226    ['"quoted  pair"']
227    >>> lex ('  oneWord | "two words"|"three  words words" ', {'|': 0})
228    ['oneWord', 0, '"two words"', 0, '"three  words words"']
229    >>> tokenMap = { c: i for (i, c) in enumerate("(){}~:;>,") }
230    >>> tokenMap['::'] = 9
231    >>> tokenMap['~~'] = 10
232    >>> lex(
233    ...     '{~~2:best(brains, brawn)>{set switch on}'
234    ...     '{deactivate ,1; bounce}}',
235    ...     tokenMap
236    ... )
237    [2, 10, '2', 5, 'best', 0, 'brains', 8, 'brawn', 1, 7, 2, 'set',\
238 'switch', 'on', 3, 2, 'deactivate', 8, '1', 6, 'bounce', 3, 3]
239    >>> lex('set where::mechanism state', tokenMap)
240    ['set', 'where', 9, 'mechanism', 'state']
241    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
242    >>> esc = r'"escape \\\\a"'
243    >>> result = [ r'"escape \\\\a"' ]  # 'quoted' doubles the backslash
244    >>> len(esc)
245    12
246    >>> len(result[0])
247    12
248    >>> lex(esc) == result
249    True
250    >>> quoteInQuote = r'before "hello \\\\ \\" goodbye"after'
251    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
252    >>> expect = ['before', r'"hello \\\\ \\" goodbye"', 'after']
253    >>> lex(quoteInQuote) == expect
254    True
255    >>> lex('O\\'Neill')
256    ["O'Neill"]
257    >>> lex('one "quote ')
258    ['one', '"quote "']
259    >>> lex('geo*15', {'*': 0})
260    ['geo', 0, '15']
261    """
262    if tokenMap is None:
263        tokenMap = {}
264    tokenStarts: Dict[str, List[str]] = {}
265    for key in sorted(tokenMap.keys(), key=lambda x: -len(x)):
266        tokenStarts.setdefault(key[:1], []).append(key)
267    tokens: LexedTokens = []
268    sofar = ''
269    inQuote = False
270    escaped = False
271    skip = 0
272    for i in range(len(characters)):
273        if skip > 0:
274            skip -= 1
275            continue
276
277        char = characters[i]
278        if escaped:
279            # TODO: Escape sequences?
280            sofar += char
281            escaped = False
282
283        elif char == '\\':
284            if inQuote:
285                escaped = True
286            else:
287                sofar += char
288
289        elif char == '"':
290            if sofar != '':
291                if inQuote:
292                    tokens.append(utils.quoted(sofar))
293                else:
294                    tokens.append(sofar)
295            sofar = ''
296            inQuote = not inQuote
297
298        elif inQuote:
299            sofar += char
300
301        elif char in tokenStarts:
302            options = tokenStarts[char]
303            hit: Optional[str] = None
304            for possibility in options:
305                lp = len(possibility)
306                if (
307                    (lp == 1 and char == possibility)
308                or characters[i:i + lp] == possibility
309                ):
310                    hit = possibility
311                    break
312
313            if hit is not None:
314                if sofar != '':
315                    tokens.append(sofar)
316                tokens.append(tokenMap[possibility])
317                sofar = ''
318                skip = len(hit) - 1
319            else:  # Not actually a recognized token
320                sofar += char
321
322        elif char.isspace():
323            if sofar != '':
324                tokens.append(sofar)
325            sofar = ''
326
327        else:
328            sofar += char
329
330    if sofar != '':
331        if inQuote:
332            tokens.append(utils.quoted(sofar))
333        else:
334            tokens.append(sofar)
335
336    return tokens

Lexes a list of tokens from a characters string. Recognizes any special characters you provide in the token map, as well as collections of non-mapped characters. Recognizes double-quoted strings which can contain any of those (and which use backslash-escapes for internal double quotes) and includes quoted versions of those strings as tokens (any token string starting with a double quote will be such a string). Breaks tokens on whitespace outside of quotation marks, and ignores that whitespace.

Examples:

>>> lex('abc')
['abc']
>>> lex('(abc)', {'(': 0, ')': 1})
[0, 'abc', 1]
>>> lex('{(abc)}', {'(': 0, ')': 1, '{': 2, '}': 3})
[2, 0, 'abc', 1, 3]
>>> lex('abc def')
['abc', 'def']
>>> lex('abc   def')
['abc', 'def']
>>> lex('abc \n def')
['abc', 'def']
>>> lex ('"quoted"')
['"quoted"']
>>> lex ('"quoted  pair"')
['"quoted  pair"']
>>> lex ('  oneWord | "two words"|"three  words words" ', {'|': 0})
['oneWord', 0, '"two words"', 0, '"three  words words"']
>>> tokenMap = { c: i for (i, c) in enumerate("(){}~:;>,") }
>>> tokenMap['::'] = 9
>>> tokenMap['~~'] = 10
>>> lex(
...     '{~~2:best(brains, brawn)>{set switch on}'
...     '{deactivate ,1; bounce}}',
...     tokenMap
... )
[2, 10, '2', 5, 'best', 0, 'brains', 8, 'brawn', 1, 7, 2, 'set', 'switch', 'on', 3, 2, 'deactivate', 8, '1', 6, 'bounce', 3, 3]
>>> lex('set where::mechanism state', tokenMap)
['set', 'where', 9, 'mechanism', 'state']
>>> # Note r' doesn't take full effect 'cause we're in triple quotes
>>> esc = r'"escape \\a"'
>>> result = [ r'"escape \\a"' ]  # 'quoted' doubles the backslash
>>> len(esc)
12
>>> len(result[0])
12
>>> lex(esc) == result
True
>>> quoteInQuote = r'before "hello \\ \" goodbye"after'
>>> # Note r' doesn't take full effect 'cause we're in triple quotes
>>> expect = ['before', r'"hello \\ \" goodbye"', 'after']
>>> lex(quoteInQuote) == expect
True
>>> lex('O\'Neill')
["O'Neill"]
>>> lex('one "quote ')
['one', '"quote "']
>>> lex('geo*15', {'*': 0})
['geo', 0, '15']
def unLex( tokens: List[Union[Lexeme, str]], tokenMap: Optional[Dict[str, Lexeme]] = None) -> str:
339def unLex(
340    tokens: LexedTokens,
341    tokenMap: Optional[Dict[str, Lexeme]] = None
342) -> str:
343    """
344    Turns lexed stuff back into a string, substituting strings back into
345    token spots by reversing the given token map. Adds quotation marks to
346    complex tokens where necessary to prevent them from re-lexing into
347    multiple tokens (but `lex` doesn't  remove those, so in some cases
348    there's not a perfect round-trip unLex -> lex).
349
350    For example:
351
352    >>> unLex(['a', 'b'])
353    'a b'
354    >>> tokens = {'(': 0, ')': 1, '{': 2, '}': 3, '::': 4}
355    >>> unLex([0, 'hi', 1], tokens)
356    '(hi)'
357    >>> unLex([0, 'visit', 'zone', 4, 'decision', 1], tokens)
358    '(visit zone::decision)'
359    >>> q = unLex(['a complex token', '\\'single\\' and "double" quotes'])
360    >>> q  # unLex adds quotes
361    '"a complex token" "\\'single\\' and \\\\"double\\\\" quotes"'
362    >>> lex(q)  # Not the same as the original list
363    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
364    >>> lex(unLex(lex(q)))  # But further round-trips work
365    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
366
367    TODO: Fix this:
368    For now, it generates incorrect results when token combinations can
369    be ambiguous. These ambiguous token combinations should not ever be
370    generated by `lex` at least. For example:
371
372    >>> ambiguous = {':': 0, '::': 1}
373    >>> u = unLex(['a', 0, 0, 'b'], ambiguous)
374    >>> u
375    'a::b'
376    >>> l = lex(u, ambiguous)
377    >>> l
378    ['a', 1, 'b']
379    >>> l == u
380    False
381    """
382    if tokenMap is None:
383        nTokens = 0
384        revMap = {}
385    else:
386        nTokens = len(tokenMap)
387        revMap = {y: x for (x, y) in tokenMap.items()}
388
389    prevRaw = False
390    # TODO: add spaces where necessary to disambiguate token sequences...
391    if len(revMap) != nTokens:
392        warnings.warn(
393            (
394                "Irreversible token map! Two or more tokens have the same"
395                " integer value."
396            ),
397            ParseWarning
398        )
399
400    result = ""
401    for item in tokens:
402        if isinstance(item, int):
403            try:
404                result += revMap[item]
405            except KeyError:
406                raise ValueError(
407                    f"Tokens list contains {item} but the token map"
408                    f" does not have any entry which maps to {item}."
409                )
410            prevRaw = False
411        elif isinstance(item, str):
412            if prevRaw:
413                result += ' '
414            if len(lex(item)) > 1:
415                result += utils.quoted(item)
416            else:
417                result += item
418            prevRaw = True
419        else:
420            raise TypeError(
421                f"Token list contained non-int non-str item:"
422                f" {repr(item)}"
423            )
424
425    return result

Turns lexed stuff back into a string, substituting strings back into token spots by reversing the given token map. Adds quotation marks to complex tokens where necessary to prevent them from re-lexing into multiple tokens (but lex doesn't remove those, so in some cases there's not a perfect round-trip unLex -> lex).

For example:

>>> unLex(['a', 'b'])
'a b'
>>> tokens = {'(': 0, ')': 1, '{': 2, '}': 3, '::': 4}
>>> unLex([0, 'hi', 1], tokens)
'(hi)'
>>> unLex([0, 'visit', 'zone', 4, 'decision', 1], tokens)
'(visit zone::decision)'
>>> q = unLex(['a complex token', '\'single\' and "double" quotes'])
>>> q  # unLex adds quotes
'"a complex token" "\'single\' and \\"double\\" quotes"'
>>> lex(q)  # Not the same as the original list
['"a complex token"', '"\'single\' and \\"double\\" quotes"']
>>> lex(unLex(lex(q)))  # But further round-trips work
['"a complex token"', '"\'single\' and \\"double\\" quotes"']

TODO: Fix this: For now, it generates incorrect results when token combinations can be ambiguous. These ambiguous token combinations should not ever be generated by lex at least. For example:

>>> ambiguous = {':': 0, '::': 1}
>>> u = unLex(['a', 0, 0, 'b'], ambiguous)
>>> u
'a::b'
>>> l = lex(u, ambiguous)
>>> l
['a', 1, 'b']
>>> l == u
False
def normalizeEnds(tokens: List, start: int, end: int) -> Tuple[int, int, int]:
432def normalizeEnds(
433    tokens: List,
434    start: int,
435    end: int
436) -> Tuple[int, int, int]:
437    """
438    Given a tokens list and start & end integers, does some bounds
439    checking and normalization on the integers: converts negative
440    indices to positive indices, and raises an `IndexError` if they're
441    out-of-bounds after conversion. Returns a tuple containing the
442    normalized start & end indices, along with the number of tokens they
443    cover.
444    """
445    totalTokens = len(tokens)
446    if start < -len(tokens):
447        raise IndexError(
448            f"Negative start index out of bounds (got {start} for"
449            f" {totalTokens} tokens)."
450        )
451    elif start >= totalTokens:
452        raise IndexError(
453            f"Start index out of bounds (got {start} for"
454            f" {totalTokens} tokens)."
455        )
456    elif start < 0:
457        start = totalTokens + start
458
459    if end < -len(tokens):
460        raise IndexError(
461            f"Negative end index out of bounds (got {end} for"
462            f" {totalTokens} tokens)."
463        )
464    elif end >= totalTokens:
465        raise IndexError(
466            f"Start index out of bounds (got {end} for"
467            f" {totalTokens} tokens)."
468        )
469    elif end < 0:
470        end = totalTokens + end
471
472    if end >= len(tokens):
473        end = len(tokens) - 1
474
475    return (start, end, (end - start) + 1)

Given a tokens list and start & end integers, does some bounds checking and normalization on the integers: converts negative indices to positive indices, and raises an IndexError if they're out-of-bounds after conversion. Returns a tuple containing the normalized start & end indices, along with the number of tokens they cover.

def findSeparatedParts( tokens: List[Union[Lexeme, str]], sep: Union[str, int], start: int = 0, end: int = -1, groupStart: Union[str, int, NoneType] = None, groupEnd: Union[str, int, NoneType] = None) -> Generator[Tuple[int, int], NoneType, NoneType]:
478def findSeparatedParts(
479    tokens: LexedTokens,
480    sep: Union[str, int],
481    start: int = 0,
482    end: int = -1,
483    groupStart: Union[str, int, None] = None,
484    groupEnd: Union[str, int, None] = None
485) -> Generator[Tuple[int, int], None, None]:
486    """
487    Finds parts separated by a separator lexeme, such as ';' or ',', but
488    ignoring separators nested within groupStart/groupEnd pairs (if
489    those arguments are supplied). For each token sequence found, yields
490    a tuple containing the start index and end index for that part, with
491    separators not included in the parts.
492
493    If two separators appear in a row, the start/end pair will have a
494    start index one after the end index.
495
496    If there are no separators, yields one pair containing the start and
497    end of the entire tokens sequence.
498
499    Raises a `ParseError` if there are unbalanced grouping elements.
500
501    For example:
502
503    >>> list(findSeparatedParts(
504    ...     [ 'one' ],
505    ...     Lexeme.sepOrDelay,
506    ...     0,
507    ...     0,
508    ...     Lexeme.openParen,
509    ...     Lexeme.closeParen
510    ... ))
511    [(0, 0)]
512    >>> list(findSeparatedParts(
513    ...     [
514    ...         'best',
515    ...         Lexeme.openParen,
516    ...         'chess',
517    ...         Lexeme.sepOrDelay,
518    ...         'checkers',
519    ...         Lexeme.closeParen
520    ...     ],
521    ...     Lexeme.sepOrDelay,
522    ...     2,
523    ...     4,
524    ...     Lexeme.openParen,
525    ...     Lexeme.closeParen
526    ... ))
527    [(2, 2), (4, 4)]
528    """
529    start, end, n = normalizeEnds(tokens, start, end)
530    level = 0
531    thisStart = start
532    for i in range(start, end + 1):
533        token = tokens[i]
534        if token == sep and level == 0:
535            yield (thisStart, i - 1)
536            thisStart = i + 1
537        elif token == groupStart:
538            level += 1
539        elif token == groupEnd:
540            level -= 1
541            if level < 0:
542                raise ParseError("Unbalanced grouping tokens.")
543    if level < 0:
544        raise ParseError("Unbalanced grouping tokens.")
545    yield (thisStart, end)

Finds parts separated by a separator lexeme, such as ';' or ',', but ignoring separators nested within groupStart/groupEnd pairs (if those arguments are supplied). For each token sequence found, yields a tuple containing the start index and end index for that part, with separators not included in the parts.

If two separators appear in a row, the start/end pair will have a start index one after the end index.

If there are no separators, yields one pair containing the start and end of the entire tokens sequence.

Raises a ParseError if there are unbalanced grouping elements.

For example:

>>> list(findSeparatedParts(
...     [ 'one' ],
...     Lexeme.sepOrDelay,
...     0,
...     0,
...     Lexeme.openParen,
...     Lexeme.closeParen
... ))
[(0, 0)]
>>> list(findSeparatedParts(
...     [
...         'best',
...         Lexeme.openParen,
...         'chess',
...         Lexeme.sepOrDelay,
...         'checkers',
...         Lexeme.closeParen
...     ],
...     Lexeme.sepOrDelay,
...     2,
...     4,
...     Lexeme.openParen,
...     Lexeme.closeParen
... ))
[(2, 2), (4, 4)]
K = ~K

Type variable for dictionary keys.

V = ~V

Type variable for dictionary values.

def checkCompleteness( name, mapping: Dict[~K, ~V], keysSet: Optional[Set[~K]] = None, valuesSet: Optional[Set[~V]] = None):
554def checkCompleteness(
555    name,
556    mapping: Dict[K, V],
557    keysSet: Optional[Set[K]] = None,
558    valuesSet: Optional[Set[V]] = None
559):
560    """
561    Checks that a dictionary has a certain exact set of keys (or
562    values). Raises a `ValueError` if it finds an extra or missing key
563    or value.
564    """
565    if keysSet is not None:
566        for key in mapping.keys():
567            if key not in keysSet:
568                raise ValueError("{name} has extra key {repr(key)}.")
569
570        for key in keysSet:
571            if key not in mapping:
572                raise ValueError("{name} is missing key {repr(key)}.")
573
574    if valuesSet is not None:
575        for value in mapping.values():
576            if value not in valuesSet:
577                raise ValueError("{name} has extra value {repr(value)}.")
578
579        checkVals = mapping.values()
580        for value in valuesSet:
581            if value not in checkVals:
582                raise ValueError("{name} is missing value {repr(value)}.")

Checks that a dictionary has a certain exact set of keys (or values). Raises a ValueError if it finds an extra or missing key or value.

class ParseFormat:
 585class ParseFormat:
 586    """
 587    A ParseFormat manages the mapping from markers to entry types and
 588    vice versa.
 589    """
 590    def __init__(
 591        self,
 592        formatDict: Format = DEFAULT_FORMAT,
 593        effectNames: Dict[str, base.EffectType] = DEFAULT_EFFECT_NAMES,
 594        focalizationNames: Dict[
 595            str,
 596            base.DomainFocalization
 597        ] = DEFAULT_FOCALIZATION_NAMES,
 598        successFailureIndicators: Tuple[str, str] = DEFAULT_SF_INDICATORS
 599    ):
 600        """
 601        Sets up the parsing format. Requires a `Format` dictionary to
 602        define the specifics. Raises a `ValueError` unless the keys of
 603        the `Format` dictionary exactly match the `Lexeme` values.
 604        """
 605        self.formatDict = formatDict
 606        self.effectNames = effectNames
 607        self.focalizationNames = focalizationNames
 608        if (
 609            len(successFailureIndicators) != 2
 610        or any(len(i) != 1 for i in successFailureIndicators)
 611        ):
 612            raise ValueError(
 613                f"Invalid success/failure indicators: must be a pair of"
 614                f" length-1 strings. Got: {successFailureIndicators!r}"
 615            )
 616        self.successIndicator, self.failureIndicator = (
 617            successFailureIndicators
 618        )
 619
 620        # Check completeness for each dictionary
 621        checkCompleteness('formatDict', self.formatDict, set(Lexeme))
 622        checkCompleteness(
 623            'effectNames',
 624            self.effectNames,
 625            valuesSet=set(get_args(base.EffectType))
 626        )
 627        checkCompleteness(
 628            'focalizationNames',
 629            self.focalizationNames,
 630            valuesSet=set(get_args(base.DomainFocalization))
 631        )
 632
 633        # Build some reverse lookup dictionaries for specific
 634        self.reverseFormat = {y: x for (x, y) in self.formatDict.items()}
 635
 636        # circumstances:
 637        self.effectModMap = {
 638            self.formatDict[x]: x
 639            for x in [
 640                Lexeme.effectCharges,
 641                Lexeme.sepOrDelay,
 642                Lexeme.inCommon,
 643                Lexeme.isHidden
 644            ]
 645        }
 646
 647    def lex(self, content: str) -> LexedTokens:
 648        """
 649        Applies `lex` using this format's lexeme mapping.
 650        """
 651        return lex(content, self.reverseFormat)
 652
 653    def onOff(self, word: str) -> Optional[bool]:
 654        """
 655        Parse an on/off indicator and returns a boolean (`True` for on
 656        and `False` for off). Returns `None` if the word isn't either
 657        the 'on' or the 'off' word. Generates a `ParseWarning`
 658        (and still returns `None`) if the word is a case-swapped version
 659        of the 'on' or 'off' word and is not equal to either of them.
 660        """
 661        onWord = self.formatDict[Lexeme.stateOn]
 662        offWord = self.formatDict[Lexeme.stateOff]
 663
 664        # Generate warning if we suspect a case error
 665        if (
 666            word.casefold() in (onWord, offWord)
 667        and word not in (onWord, offWord)
 668        ):
 669            warnings.warn(
 670                (
 671                    f"Word '{word}' cannot be interpreted as an on/off"
 672                    f" value, although it is almost one (the correct"
 673                    f" values are '{onWord}' and '{offWord}'."
 674                ),
 675                ParseWarning
 676            )
 677
 678        # return the appropriate value
 679        if word == onWord:
 680            return True
 681        elif word == offWord:
 682            return False
 683        else:
 684            return None
 685
 686    def matchingBrace(
 687        self,
 688        tokens: LexedTokens,
 689        where: int,
 690        opener: int = Lexeme.openCurly,
 691        closer: int = Lexeme.closeCurly
 692    ) -> int:
 693        """
 694        Returns the index within the given tokens list of the closing
 695        curly brace which matches the open brace at the specified index.
 696        You can specify custom `opener` and/or `closer` lexemes to find
 697        matching pairs of other things. Raises a `ParseError` if there
 698        is no opening brace at the specified index, or if there isn't a
 699        matching closing brace. Handles nested braces of the specified
 700        type.
 701
 702        Examples:
 703        >>> pf = ParseFormat()
 704        >>> ob = Lexeme.openCurly
 705        >>> cb = Lexeme.closeCurly
 706        >>> pf.matchingBrace([ob, cb], 0)
 707        1
 708        >>> pf.matchingBrace([ob, cb], 1)
 709        Traceback (most recent call last):
 710          ...
 711        exploration.parsing.ParseError: ...
 712        >>> pf.matchingBrace(['hi', ob, cb], 0)
 713        Traceback (most recent call last):
 714          ...
 715        exploration.parsing.ParseError: ...
 716        >>> pf.matchingBrace(['hi', ob, cb], 1)
 717        2
 718        >>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
 719        3
 720        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
 721        Traceback (most recent call last):
 722          ...
 723        exploration.parsing.ParseError: ...
 724        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
 725        3
 726        >>> pf.matchingBrace([ob, ob, cb, cb], 0)
 727        3
 728        >>> pf.matchingBrace([ob, ob, cb, cb], 1)
 729        2
 730        >>> pf.matchingBrace([ob, cb, ob, cb], 0)
 731        1
 732        >>> pf.matchingBrace([ob, cb, ob, cb], 2)
 733        3
 734        >>> pf.matchingBrace([ob, cb, cb, cb], 0)
 735        1
 736        >>> pf.matchingBrace([ob, ob, ob, cb], 0)
 737        Traceback (most recent call last):
 738          ...
 739        exploration.parsing.ParseError: ...
 740        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
 741        7
 742        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
 743        6
 744        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
 745        Traceback (most recent call last):
 746          ...
 747        exploration.parsing.ParseError: ...
 748        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
 749        4
 750        >>> op = Lexeme.openParen
 751        >>> cp = Lexeme.closeParen
 752        >>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
 753        3
 754        """
 755        if where >= len(tokens):
 756            raise ParseError(
 757                f"Out-of-bounds brace start: index {where} with"
 758                f" {len(tokens)} tokens."
 759            )
 760        if tokens[where] != opener:
 761            raise ParseError(
 762                f"Can't find matching brace for token"
 763                f" {repr(tokens[where])} at index {where} because it's"
 764                f" not an open brace."
 765            )
 766
 767        level = 1
 768        for i in range(where + 1, len(tokens)):
 769            token = tokens[i]
 770            if token == opener:
 771                level += 1
 772            elif token == closer:
 773                level -= 1
 774                if level == 0:
 775                    return i
 776
 777        raise ParseError(
 778            f"Failed to find matching curly brace from index {where}."
 779        )
 780
 781    def parseFocalization(self, word: str) -> base.DomainFocalization:
 782        """
 783        Parses a focalization type for a domain, recognizing
 784        'domainFocalizationSingular', 'domainFocalizationPlural', and
 785        'domainFocalizationSpreading'.
 786        """
 787        try:
 788            return self.focalizationNames[word]
 789        except KeyError:
 790            raise ParseError(
 791                f"Invalid domain focalization name {repr(word)}. Valid"
 792                f" name are: {repr(list(self.focalizationNames))}'."
 793            )
 794
 795    def parseTagValue(self, value: str) -> base.TagValue:
 796        """
 797        Converts a string to a tag value, following these rules:
 798
 799        1. If the string is exactly one of 'None', 'True', or 'False', we
 800            convert it to the corresponding Python value.
 801        2. If the string can be converted to an integer without raising a
 802            ValueError, we use that integer.
 803        3. If the string can be converted to a float without raising a
 804            ValueError, we use that float.
 805        4. Otherwise, it remains a string.
 806
 807        Note that there is currently no syntax for using list, dictionary,
 808        Requirement, or Consequence tag values.
 809        TODO: Support those types?
 810
 811        Examples:
 812
 813        >>> pf = ParseFormat()
 814        >>> pf.parseTagValue('hi')
 815        'hi'
 816        >>> pf.parseTagValue('3')
 817        3
 818        >>> pf.parseTagValue('3.0')
 819        3.0
 820        >>> pf.parseTagValue('True')
 821        True
 822        >>> pf.parseTagValue('False')
 823        False
 824        >>> pf.parseTagValue('None') is None
 825        True
 826        >>> pf.parseTagValue('none')
 827        'none'
 828        """
 829        # TODO: Allow these keywords to be redefined?
 830        if value == 'True':
 831            return True
 832        elif value == 'False':
 833            return False
 834        elif value == 'None':
 835            return None
 836        else:
 837            try:
 838                return int(value)
 839            except ValueError:
 840                try:
 841                    return float(value)
 842                except ValueError:
 843                    return value
 844
 845    def unparseTagValue(self, value: base.TagValue) -> str:
 846        """
 847        Converts a tag value into a string that would be parsed back into a
 848        tag value via `parseTagValue`. Currently does not work for list,
 849        dictionary, Requirement, or Consequence values.
 850        TODO: Those
 851        """
 852        return str(value)
 853
 854    def hasZoneParts(self, name: str) -> bool:
 855        """
 856        Returns true if the specified name contains zone parts (using
 857        the `zoneSeparator`).
 858        """
 859        return self.formatDict[Lexeme.zoneSeparator] in name
 860
 861    def splitZone(
 862        self,
 863        name: str
 864    ) -> Tuple[List[base.Zone], base.DecisionName]:
 865        """
 866        Splits a decision name that includes zone information into the
 867        list-of-zones part and the decision part. If there is no zone
 868        information in the name, the list-of-zones will be an empty
 869        list.
 870        """
 871        sep = self.formatDict[Lexeme.zoneSeparator]
 872        parts = name.split(sep)
 873        return (list(parts[:-1]), parts[-1])
 874
 875    def prefixWithZone(
 876        self,
 877        name: base.DecisionName,
 878        zone: base.Zone
 879    ) -> base.DecisionName:
 880        """
 881        Returns the given decision name, prefixed with the given zone
 882        name. Does NOT check whether the decision name already includes
 883        a prefix or not.
 884        """
 885        return zone + self.formatDict[Lexeme.zoneSeparator] + name
 886
 887    def parseAnyTransitionFromTokens(
 888        self,
 889        tokens: LexedTokens,
 890        start: int = 0
 891    ) -> Tuple[base.TransitionWithOutcomes, int]:
 892        """
 893        Parses a `base.TransitionWithOutcomes` from a tokens list,
 894        accepting either a transition name or a transition name followed
 895        by a `Lexeme.withDetails` followed by a string of success and
 896        failure indicator characters. Returns a tuple containing a
 897        `base.TransitionWithOutcomes` and an integer indicating the end
 898        index of the parsed item within the tokens.
 899        """
 900        # Normalize start index so we can do index math
 901        if start < 0:
 902            useIndex = len(tokens) + start
 903        else:
 904            useIndex = start
 905
 906        try:
 907            first = tokens[useIndex]
 908        except IndexError:
 909            raise ParseError(
 910                f"Invalid token index: {start!r} among {len(tokens)}"
 911                f" tokens."
 912            )
 913
 914        if isinstance(first, Lexeme):
 915            raise ParseError(
 916                f"Expecting a transition name (possibly with a"
 917                f" success/failure indicator string) but first token is"
 918                f" {first!r}."
 919            )
 920
 921        try:
 922            second = tokens[useIndex + 1]
 923            third = tokens[useIndex + 2]
 924        except IndexError:
 925            return ((first, []), useIndex)
 926
 927        if second != Lexeme.withDetails or isinstance(third, Lexeme):
 928            return ((first, []), useIndex)
 929
 930        outcomes = []
 931        for char in third:
 932            if char == self.successIndicator:
 933                outcomes.append(True)
 934            elif char == self.failureIndicator:
 935                outcomes.append(False)
 936            else:
 937                return ((first, []), useIndex)
 938
 939        return ((first, outcomes), useIndex + 2)
 940
 941    def parseTransitionWithOutcomes(
 942        self,
 943        content: str
 944    ) -> base.TransitionWithOutcomes:
 945        """
 946        Takes a transition that may have outcomes listed as a series of
 947        s/f strings after a colon and returns the corresponding
 948        `TransitionWithOutcomes` tuple. Calls `lex` and then
 949        `parseAnyTransitionFromTokens`.
 950        """
 951        return self.parseAnyTransitionFromTokens(self.lex(content))[0]
 952
 953    def unparseTransitionWithOutocmes(
 954        self,
 955        transition: base.AnyTransition
 956    ) -> str:
 957        """
 958        Turns a `base.AnyTransition` back into a string that would parse
 959        to an equivalent `base.TransitionWithOutcomes` via
 960        `parseTransitionWithOutcomes`. If a bare `base.Transition` is
 961        given, returns a string that would result in a
 962        `base.TransitionWithOutcomes` that has an empty outcomes
 963        sequence.
 964        """
 965        if isinstance(transition, base.Transition):
 966            return transition
 967        elif (
 968            isinstance(transition, tuple)
 969        and len(transition) == 2
 970        and isinstance(transition[0], base.Transition)
 971        and isinstance(transition[1], list)
 972        and all(isinstance(sfi, bool) for sfi in transition[1])
 973        ):
 974            if len(transition[1]) == 0:
 975                return transition[0]
 976            else:
 977                result = transition[0] + self.formatDict[Lexeme.withDetails]
 978                for outcome in transition[1]:
 979                    if outcome:
 980                        result += self.successIndicator
 981                    else:
 982                        result += self.failureIndicator
 983                return result
 984        else:
 985            raise TypeError(
 986                f"Invalid AnyTransition: neither a string, nor a"
 987                f" length-2 tuple consisting of a string followed by a"
 988                f" list of booleans. Got: {transition!r}"
 989            )
 990
 991    def parseSpecificTransition(
 992        self,
 993        content: str
 994    ) -> Tuple[base.DecisionName, base.Transition]:
 995        """
 996        Splits a decision:transition pair to the decision and transition
 997        part, using a custom separator if one is defined.
 998        """
 999        sep = self.formatDict[Lexeme.withDetails]
1000        n = content.count(sep)
1001        if n == 0:
1002            raise ParseError(
1003                f"Cannot split '{content}' into a decision name and a"
1004                f" transition name (no separator '{sep}' found)."
1005            )
1006        elif n > 1:
1007            raise ParseError(
1008                f"Cannot split '{content}' into a decision name and a"
1009                f" transition name (too many ({n}) '{sep}' separators"
1010                f" found)."
1011            )
1012        else:
1013            return cast(
1014                Tuple[base.DecisionName, base.Transition],
1015                tuple(content.split(sep))
1016            )
1017
1018    def splitDirections(
1019        self,
1020        content: str
1021    ) -> Tuple[Optional[str], Optional[str]]:
1022        """
1023        Splits a piece of text using the 'Lexeme.reciprocalSeparator'
1024        into two pieces. If there is no separator, the second piece will
1025        be `None`; if either side of the separator is blank, that side
1026        will be `None`, and if there is more than one separator, a
1027        `ParseError` will be raised. Whitespace will be stripped from
1028        both sides of each result.
1029
1030        Examples:
1031
1032        >>> pf = ParseFormat()
1033        >>> pf.splitDirections('abc / def')
1034        ('abc', 'def')
1035        >>> pf.splitDirections('abc def ')
1036        ('abc def', None)
1037        >>> pf.splitDirections('abc def /')
1038        ('abc def', None)
1039        >>> pf.splitDirections('/abc def')
1040        (None, 'abc def')
1041        >>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
1042        Traceback (most recent call last):
1043          ...
1044        ParseError: ...
1045        """
1046        sep = self.formatDict[Lexeme.reciprocalSeparator]
1047        count = content.count(sep)
1048        if count > 1:
1049            raise ParseError(
1050                f"Too many split points ('{sep}') in content:"
1051                f" '{content}' (only one is allowed)."
1052            )
1053
1054        elif count == 1:
1055            before, after = content.split(sep)
1056            before = before.strip()
1057            after = after.strip()
1058            return (before or None, after or None)
1059
1060        else: # no split points
1061            stripped = content.strip()
1062            if stripped:
1063                return stripped, None
1064            else:
1065                return None, None
1066
1067    def parseItem(
1068        self,
1069        item: str
1070    ) -> Union[
1071        base.Capability,
1072        Tuple[base.Token, int],
1073        Tuple[base.MechanismName, base.MechanismState]
1074    ]:
1075        """
1076        Parses an item, which is a capability (just a string), a
1077        token-type*number pair (returned as a tuple with the number
1078        converted to an integer), or a mechanism-name:state pair
1079        (returned as a tuple with the state as a string). The
1080        'Lexeme.tokenCount' and `Lexeme.mechanismSeparator` format
1081        values determine the separators that this looks for.
1082        """
1083        tsep = self.formatDict[Lexeme.tokenCount]
1084        msep = self.formatDict[Lexeme.mechanismSeparator]
1085        if tsep in item:
1086            # It's a token w/ an associated count
1087            parts = item.split(tsep)
1088            if len(parts) != 2:
1089                raise ParseError(
1090                    f"Item '{item}' has a '{tsep}' but doesn't separate"
1091                    f" into a token type and a count."
1092                )
1093            typ, count = parts
1094            try:
1095                num = int(count)
1096            except ValueError:
1097                raise ParseError(
1098                    f"Item '{item}' has invalid token count '{count}'."
1099                )
1100
1101            return (typ, num)
1102        elif msep in item:
1103            parts = item.split(msep)
1104            mechanism = msep.join(parts[:-1])
1105            state = parts[-1]
1106            if mechanism.endswith(':'):
1107                # Just a zone-qualified name...
1108                return item
1109            else:
1110                return (mechanism, state)
1111        else:
1112            # It's just a capability
1113            return item
1114
1115    def unparseDecisionSpecifier(self, spec: base.DecisionSpecifier) -> str:
1116        """
1117        Turns a decision specifier back into a string, which would be
1118        parsed as a decision specifier as part of various different
1119        things.
1120
1121        For example:
1122
1123        >>> pf = ParseFormat()
1124        >>> pf.unparseDecisionSpecifier(
1125        ...     base.DecisionSpecifier(None, None, 'where')
1126        ... )
1127        'where'
1128        >>> pf.unparseDecisionSpecifier(
1129        ...     base.DecisionSpecifier(None, 'zone', 'where')
1130        ... )
1131        'zone::where'
1132        >>> pf.unparseDecisionSpecifier(
1133        ...     base.DecisionSpecifier('domain', 'zone', 'where')
1134        ... )
1135        'domain//zone::where'
1136        >>> pf.unparseDecisionSpecifier(
1137        ...     base.DecisionSpecifier('domain', None, 'where')
1138        ... )
1139        'domain//where'
1140        """
1141        result = spec.name
1142        if spec.zone is not None:
1143            result = (
1144                spec.zone
1145              + self.formatDict[Lexeme.zoneSeparator]
1146              + result
1147            )
1148        if spec.domain is not None:
1149            result = (
1150                spec.domain
1151              + self.formatDict[Lexeme.domainSeparator]
1152              + result
1153            )
1154        return result
1155
1156    def unparseMechanismSpecifier(
1157        self,
1158        spec: base.MechanismSpecifier
1159    ) -> str:
1160        """
1161        Turns a mechanism specifier back into a string, which would be
1162        parsed as a mechanism specifier as part of various different
1163        things. Note that a mechanism specifier with a zone part but no
1164        decision part is not valid, since it would parse as a decision
1165        part instead.
1166
1167        For example:
1168
1169        >>> pf = ParseFormat()
1170        >>> pf.unparseMechanismSpecifier(
1171        ...     base.MechanismSpecifier(None, None, None, 'lever')
1172        ... )
1173        'lever'
1174        >>> pf.unparseMechanismSpecifier(
1175        ...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
1176        ... )
1177        'domain//zone::decision::door'
1178        >>> pf.unparseMechanismSpecifier(
1179        ...     base.MechanismSpecifier('domain', None, None, 'door')
1180        ... )
1181        'domain//door'
1182        >>> pf.unparseMechanismSpecifier(
1183        ...     base.MechanismSpecifier(None, 'a', 'b', 'door')
1184        ... )
1185        'a::b::door'
1186        >>> pf.unparseMechanismSpecifier(
1187        ...     base.MechanismSpecifier(None, 'a', None, 'door')
1188        ... )
1189        Traceback (most recent call last):
1190        ...
1191        exploration.base.InvalidMechanismSpecifierError...
1192        >>> pf.unparseMechanismSpecifier(
1193        ...     base.MechanismSpecifier(None, None, 'a', 'door')
1194        ... )
1195        'a::door'
1196        """
1197        if spec.decision is None and spec.zone is not None:
1198            raise base.InvalidMechanismSpecifierError(
1199                f"Mechanism specifier has a zone part but no decision"
1200                f" part; it cannot be unparsed since it would parse"
1201                f" differently:\n{spec}"
1202            )
1203        result = spec.name
1204        if spec.decision is not None:
1205            result = (
1206                spec.decision
1207              + self.formatDict[Lexeme.zoneSeparator]
1208              + result
1209            )
1210        if spec.zone is not None:
1211            result = (
1212                spec.zone
1213              + self.formatDict[Lexeme.zoneSeparator]
1214              + result
1215            )
1216        if spec.domain is not None:
1217            result = (
1218                spec.domain
1219              + self.formatDict[Lexeme.domainSeparator]
1220              + result
1221            )
1222        return result
1223
1224    def effectType(self, effectMarker: str) -> Optional[base.EffectType]:
1225        """
1226        Returns the `base.EffectType` string corresponding to the
1227        given effect marker string. Returns `None` for an unrecognized
1228        marker.
1229        """
1230        return self.effectNames.get(effectMarker)
1231
1232    def parseCommandFromTokens(
1233        self,
1234        tokens: LexedTokens,
1235        start: int = 0,
1236        end: int = -1
1237    ) -> commands.Command:
1238        """
1239        Given tokens that specify a `commands.Command`, parses that
1240        command and returns it. Really just turns the tokens back into
1241        strings and calls `commands.command`.
1242
1243        For example:
1244
1245        >>> pf = ParseFormat()
1246        >>> t = ['val', '5']
1247        >>> c = commands.command(*t)
1248        >>> pf.parseCommandFromTokens(t) == c
1249        True
1250        >>> t = ['op', Lexeme.tokenCount, '$val', '$val']
1251        >>> c = commands.command('op', '*', '$val', '$val')
1252        >>> pf.parseCommandFromTokens(t) == c
1253        True
1254        """
1255        start, end, nTokens = normalizeEnds(tokens, start, end)
1256        args: List[str] = []
1257        for token in tokens[start:end + 1]:
1258            if isinstance(token, Lexeme):
1259                args.append(self.formatDict[token])
1260            else:
1261                args.append(token)
1262
1263        if len(args) == 0:
1264            raise ParseError(
1265                f"No arguments for command:\n{tokens[start:end + 1]}"
1266            )
1267        return commands.command(*args)
1268
1269    def unparseCommand(self, command: commands.Command) -> str:
1270        """
1271        Turns a `Command` back into the string that would produce that
1272        command when parsed using `parseCommandList`.
1273
1274        Note that the results will be more explicit in some cases than what
1275        `parseCommandList` would accept as input.
1276
1277        For example:
1278
1279        >>> pf = ParseFormat()
1280        >>> pf.unparseCommand(
1281        ...     commands.LiteralValue(command='val', value='5')
1282        ... )
1283        'val 5'
1284        >>> pf.unparseCommand(
1285        ...     commands.LiteralValue(command='val', value='"5"')
1286        ... )
1287        'val "5"'
1288        >>> pf.unparseCommand(
1289        ...     commands.EstablishCollection(
1290        ...         command='empty',
1291        ...         collection='list'
1292        ...     )
1293        ... )
1294        'empty list'
1295        >>> pf.unparseCommand(
1296        ...     commands.AppendValue(command='append', value='$_')
1297        ... )
1298        'append $_'
1299        """
1300        candidate = None
1301        for k, v in commands.COMMAND_SETUP.items():
1302            if v[0] == type(command):
1303                if candidate is None:
1304                    candidate = k
1305                else:
1306                    raise ValueError(
1307                        f"COMMAND_SETUP includes multiple keys with"
1308                        f" {type(command)} as their value type:"
1309                        f" '{candidate}' and '{k}'."
1310                    )
1311
1312        if candidate is None:
1313            raise ValueError(
1314                f"COMMAND_SETUP has no key with {type(command)} as its"
1315                f" value type."
1316            )
1317
1318        result = candidate
1319        for x in command[1:]:
1320            # TODO: Is this hack good enough?
1321            result += ' ' + str(x)
1322        return result
1323
1324    def unparseCommandList(self, commands: List[commands.Command]) -> str:
1325        """
1326        Takes a list of commands and returns a string that would parse
1327        into them using `parseOneEffectArg`. The result contains
1328        newlines and indentation to make it easier to read.
1329
1330        For example:
1331
1332        >>> pf = ParseFormat()
1333        >>> pf.unparseCommandList(
1334        ...     [commands.command('val', '5'), commands.command('pop')]
1335        ... )
1336        '{\\n  val 5;\\n  pop;\\n}'
1337        """
1338        result = self.formatDict[Lexeme.openCurly]
1339        for cmd in commands:
1340            result += f'\n  {self.unparseCommand(cmd)};'
1341        if len(commands) > 0:
1342            result += '\n'
1343        return result + self.formatDict[Lexeme.closeCurly]
1344
1345    def parseCommandListFromTokens(
1346        self,
1347        tokens: LexedTokens,
1348        start: int = 0
1349    ) -> Tuple[List[commands.Command], int]:
1350        """
1351        Parses a command list from a list of lexed tokens, which must
1352        start with `Lexeme.openCurly`. Returns the parsed command list
1353        as a list of `commands.Command` objects, along with the end
1354        index of that command list (which will be the matching curly
1355        brace.
1356        """
1357        end = self.matchingBrace(
1358            tokens,
1359            start,
1360            Lexeme.openCurly,
1361            Lexeme.closeCurly
1362        )
1363        parts = list(
1364            findSeparatedParts(
1365                tokens,
1366                Lexeme.consequenceSeparator,
1367                start + 1,
1368                end - 1,
1369                Lexeme.openCurly,
1370                Lexeme.closeCurly,
1371            )
1372        )
1373        return (
1374            [
1375                self.parseCommandFromTokens(tokens, fromIndex, toIndex)
1376                for fromIndex, toIndex in parts
1377                if fromIndex <= toIndex  # ignore empty parts
1378            ],
1379            end
1380        )
1381
1382    def parseOneEffectArg(
1383        self,
1384        tokens: LexedTokens,
1385        start: int = 0,
1386        limit: Optional[int] = None
1387    ) -> Tuple[
1388        Union[
1389            base.Capability,  # covers 'str' possibility
1390            Tuple[base.Token, base.TokenCount],
1391            Tuple[Literal['skill'], base.Skill, base.Level],
1392            Tuple[base.MechanismSpecifier, base.MechanismState],
1393            base.DecisionSpecifier,
1394            base.DecisionID,
1395            Literal[Lexeme.inCommon, Lexeme.isHidden],
1396            Tuple[Literal[Lexeme.sepOrDelay, Lexeme.effectCharges], int],
1397            List[commands.Command]
1398        ],
1399        int
1400    ]:
1401        """
1402        Looks at tokens starting at the specified position and parses
1403        one or more of them as an effect argument (an argument that
1404        could be given to `base.effect`). Looks at various key `Lexeme`s
1405        to determine which type to use.
1406
1407        Items in the tokens list beyond the specified limit will not be
1408        considered, even when they in theory could be grouped with items
1409        up to the limit into a more complex argument.
1410
1411        For example:
1412
1413        >>> pf = ParseFormat()
1414        >>> pf.parseOneEffectArg(['hi'])
1415        ('hi', 0)
1416        >>> pf.parseOneEffectArg(['hi'], 1)
1417        Traceback (most recent call last):
1418        ...
1419        IndexError...
1420        >>> pf.parseOneEffectArg(['hi', 'bye'])
1421        ('hi', 0)
1422        >>> pf.parseOneEffectArg(['hi', 'bye'], 1)
1423        ('bye', 1)
1424        >>> pf.parseOneEffectArg(
1425        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1426        ...     0
1427        ... )
1428        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1429 name='gate'), 'open'), 2)
1430        >>> pf.parseOneEffectArg(
1431        ...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
1432        ...     1
1433        ... )
1434        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1435 name='gate'), 'open'), 3)
1436        >>> pf.parseOneEffectArg(
1437        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1438        ...     1
1439        ... )
1440        Traceback (most recent call last):
1441        ...
1442        exploration.parsing.ParseError...
1443        >>> pf.parseOneEffectArg(
1444        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1445        ...     2
1446        ... )
1447        ('open', 2)
1448        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
1449        (('gold', 10), 2)
1450        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
1451        Traceback (most recent call last):
1452        ...
1453        exploration.parsing.ParseError...
1454        >>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
1455        (<Lexeme.inCommon: ...>, 0)
1456        >>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
1457        (<Lexeme.isHidden: ...>, 0)
1458        >>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
1459        Traceback (most recent call last):
1460        ...
1461        exploration.parsing.ParseError...
1462        >>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
1463        ((<Lexeme.effectCharges: ...>, 3), 1)
1464        >>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
1465        Traceback (most recent call last):
1466        ...
1467        exploration.parsing.ParseError...
1468        >>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
1469        ((<Lexeme.sepOrDelay: ...>, -2), 1)
1470        >>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
1471        (('skill', 'agility', 3), 2)
1472        >>> pf.parseOneEffectArg(
1473        ...     [
1474        ...         'main',
1475        ...         Lexeme.domainSeparator,
1476        ...         'zone',
1477        ...         Lexeme.zoneSeparator,
1478        ...         'decision',
1479        ...         Lexeme.zoneSeparator,
1480        ...         'compass',
1481        ...         Lexeme.mechanismSeparator,
1482        ...         'north',
1483        ...         'south',
1484        ...         'east',
1485        ...         'west'
1486        ...     ],
1487        ...     0
1488        ... )
1489        ((MechanismSpecifier(domain='main', zone='zone',\
1490 decision='decision', name='compass'), 'north'), 8)
1491        >>> pf.parseOneEffectArg(
1492        ...     [
1493        ...         'before',
1494        ...         'main',
1495        ...         Lexeme.domainSeparator,
1496        ...         'zone',
1497        ...         Lexeme.zoneSeparator,
1498        ...         'decision',
1499        ...         Lexeme.zoneSeparator,
1500        ...         'compass',
1501        ...         'north',
1502        ...         'south',
1503        ...         'east',
1504        ...         'west'
1505        ...     ],
1506        ...     1
1507        ... )  # a mechanism specifier without a state will become a
1508        ...    # decision specifier
1509        (DecisionSpecifier(domain='main', zone='zone',\
1510 name='decision'), 5)
1511        >>> tokens = [
1512        ...     'set',
1513        ...     'main',
1514        ...     Lexeme.domainSeparator,
1515        ...     'zone',
1516        ...     Lexeme.zoneSeparator,
1517        ...     'compass',
1518        ...     'north',
1519        ...     'bounce',
1520        ... ]
1521        >>> pf.parseOneEffectArg(tokens, 0)
1522        ('set', 0)
1523        >>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
1524        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1525        >>> pf.parseOneEffectArg(tokens, 1)
1526        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1527        >>> pf.parseOneEffectArg(tokens, 6)
1528        ('north', 6)
1529        >>> pf.parseOneEffectArg(tokens, 7)
1530        ('bounce', 7)
1531        >>> pf.parseOneEffectArg(
1532        ...     [
1533        ...         "fort", Lexeme.zoneSeparator, "gate",
1534        ...             Lexeme.mechanismSeparator, "open",
1535        ...     ],
1536        ...     0
1537        ... )
1538        ((MechanismSpecifier(domain=None, zone=None, decision='fort',\
1539 name='gate'), 'open'), 4)
1540        >>> pf.parseOneEffectArg(
1541        ...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
1542        ...     0
1543        ... ) == ([commands.command('val', '5')], 3)
1544        True
1545        >>> a = [
1546        ...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
1547        ...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
1548        ...     'pop', Lexeme.closeCurly
1549        ... ]
1550        >>> cl = [
1551        ...     [commands.command('val', '5')],
1552        ...     [commands.command('append'), commands.command('pop')]
1553        ... ]
1554        >>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
1555        True
1556        >>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
1557        True
1558        >>> pf.parseOneEffectArg(a, 1)
1559        ('val', 1)
1560        >>> pf.parseOneEffectArg(a, 2)
1561        ('5', 2)
1562        >>> pf.parseOneEffectArg(a, 3)
1563        Traceback (most recent call last):
1564        ...
1565        exploration.parsing.ParseError...
1566        """
1567        start, limit, nTokens = normalizeEnds(
1568            tokens,
1569            start,
1570            limit if limit is not None else -1
1571        )
1572        if nTokens == 0:
1573            raise ParseError("No effect arguments available.")
1574
1575        first = tokens[start]
1576
1577        if nTokens == 1:
1578            if first in (Lexeme.inCommon, Lexeme.isHidden):
1579                return (first, start)
1580            elif not isinstance(first, str):
1581                raise ParseError(
1582                    f"Only one token and it's a special character"
1583                    f" ({first} = {repr(self.formatDict[first])})"
1584                )
1585            else:
1586                return (cast(base.Capability, first), start)
1587
1588        assert (nTokens > 1)
1589
1590        second = tokens[start + 1]
1591
1592        # Command lists start with an open curly brace and effect
1593        # modifiers start with a Lexme, but nothing else may
1594        if first == Lexeme.openCurly:
1595            return self.parseCommandListFromTokens(tokens, start)
1596        elif first in (Lexeme.inCommon, Lexeme.isHidden):
1597            return (first, start)
1598        elif first in (Lexeme.sepOrDelay, Lexeme.effectCharges):
1599            if not isinstance(second, str):
1600                raise ParseError(
1601                    f"Token following a modifier that needs a count"
1602                    f" must be a string in tokens:"
1603                    f"\n{tokens[start:limit or len(tokens)]}"
1604                )
1605            try:
1606                val = int(second)
1607            except ValueError:
1608                raise ParseError(
1609                    f"Token following a modifier that needs a count"
1610                    f" must be convertible to an int:"
1611                    f"\n{tokens[start:limit or len(tokens)]}"
1612                )
1613
1614            first = cast(
1615                Literal[Lexeme.sepOrDelay, Lexeme.effectCharges],
1616                first
1617            )
1618            return ((first, val), start + 1)
1619        elif not isinstance(first, str):
1620            raise ParseError(
1621                f"First token must be a string unless it's a modifier"
1622                f" lexeme or command/reversion-set opener. Got:"
1623                f"\n{tokens[start:limit or len(tokens)]}"
1624            )
1625
1626        # If we have two strings in a row, then the first is our parsed
1627        # value alone and we'll parse the second separately.
1628        if isinstance(second, str):
1629            return (first, start)
1630        elif second in (Lexeme.inCommon, Lexeme.isHidden):
1631            return (first, start)
1632
1633        # Must have at least 3 tokens at this point, or else we need to
1634        # have the inCommon or isHidden lexeme second.
1635        if nTokens < 3:
1636            return (first, start)
1637
1638        third = tokens[start + 2]
1639        if not isinstance(third, str):
1640            return (first, start)
1641
1642        second = cast(Lexeme, second)
1643        third = cast(str, third)
1644
1645        if second in (Lexeme.tokenCount, Lexeme.skillLevel):
1646            try:
1647                num = int(third)
1648            except ValueError:
1649                raise ParseError(
1650                    f"Invalid effect tokens: count for Tokens or level"
1651                    f" for Skill must be convertible to an integer."
1652                    f"\n{tokens[start:limit + 1]}"
1653                )
1654            if second == Lexeme.tokenCount:
1655                return ((first, num), start + 2)  # token/count pair
1656            else:
1657                return (('skill', first, num), start + 2)  # token/count pair
1658
1659        elif second == Lexeme.mechanismSeparator:  # bare mechanism
1660            return (
1661                (
1662                    base.MechanismSpecifier(
1663                        domain=None,
1664                        zone=None,
1665                        decision=None,
1666                        name=first
1667                    ),
1668                    third
1669                ),
1670                start + 2
1671            )
1672
1673        elif second in (Lexeme.domainSeparator, Lexeme.zoneSeparator):
1674            try:
1675                mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
1676                    tokens,
1677                    start
1678                )  # works whether it's a mechanism or decision specifier...
1679            except ParseError:
1680                return self.parseDecisionSpecifierFromTokens(tokens, start)
1681            if mEnd + 2 > limit:
1682                # No room for following mechanism separator + state
1683                return self.parseDecisionSpecifierFromTokens(tokens, start)
1684            sep = tokens[mEnd + 1]
1685            after = tokens[mEnd + 2]
1686            if sep == Lexeme.mechanismSeparator:
1687                if not isinstance(after, str):
1688                    raise ParseError(
1689                        f"Mechanism separator not followed by state:"
1690                        f"\n{tokens[start]}"
1691                    )
1692                return ((mSpec, after), mEnd + 2)
1693            else:
1694                # No mechanism separator afterwards
1695                return self.parseDecisionSpecifierFromTokens(tokens, start)
1696
1697        else:  # unrecognized as a longer combo
1698            return (first, start)
1699
1700    def coalesceEffectArgs(
1701        self,
1702        tokens: LexedTokens,
1703        start: int = 0,
1704        end: int = -1
1705    ) -> Tuple[
1706        List[  # List of effect args
1707            Union[
1708                base.Capability,  # covers 'str' possibility
1709                Tuple[base.Token, base.TokenCount],
1710                Tuple[Literal['skill'], base.Skill, base.Level],
1711                Tuple[base.MechanismSpecifier, base.MechanismState],
1712                base.DecisionSpecifier,
1713                List[commands.Command],
1714                Set[str]
1715            ]
1716        ],
1717        Tuple[  # Slots for modifiers: common/hidden/charges/delay
1718            Optional[bool],
1719            Optional[bool],
1720            Optional[int],
1721            Optional[int],
1722        ]
1723    ]:
1724        """
1725        Given a region of a lexed tokens list which contains one or more
1726        effect arguments, combines token sequences representing things
1727        like capabilities, mechanism states, token counts, and skill
1728        levels, representing these using the tuples that would be passed
1729        to `base.effect`. Returns a tuple with two elements:
1730
1731        - First, a list that contains several different kinds of
1732            objects, each of which is distinguishable by its type or
1733            part of its value.
1734        - Next, a tuple with four entires for common, hidden, charges,
1735            and/or delay values based on the presence of modifier
1736            sequences. Any or all of these may be `None` if the relevant
1737            modifier was not present (the usual case).
1738
1739        For example:
1740
1741        >>> pf = ParseFormat()
1742        >>> pf.coalesceEffectArgs(["jump"])
1743        (['jump'], (None, None, None, None))
1744        >>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
1745        ([('coin', 3), 'fly'], (None, None, None, None))
1746        >>> pf.coalesceEffectArgs(
1747        ...     [
1748        ...         "fort", Lexeme.zoneSeparator, "gate",
1749        ...             Lexeme.mechanismSeparator, "open"
1750        ...     ]
1751        ... )
1752        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1753 name='gate'), 'open')], (None, None, None, None))
1754        >>> pf.coalesceEffectArgs(
1755        ...     [
1756        ...         "main", Lexeme.domainSeparator, "cliff"
1757        ...     ]
1758        ... )
1759        ([DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1760 (None, None, None, None))
1761        >>> pf.coalesceEffectArgs(
1762        ...     [
1763        ...         "door", Lexeme.mechanismSeparator, "open"
1764        ...     ]
1765        ... )
1766        ([(MechanismSpecifier(domain=None, zone=None, decision=None,\
1767 name='door'), 'open')], (None, None, None, None))
1768        >>> pf.coalesceEffectArgs(
1769        ...     [
1770        ...         "fort", Lexeme.zoneSeparator, "gate",
1771        ...             Lexeme.mechanismSeparator, "open",
1772        ...         "canJump",
1773        ...         "coins", Lexeme.tokenCount, "3",
1774        ...         Lexeme.inCommon,
1775        ...         "agility", Lexeme.skillLevel, "-1",
1776        ...         Lexeme.sepOrDelay, "0",
1777        ...         "main", Lexeme.domainSeparator, "cliff"
1778        ...     ]
1779        ... )
1780        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1781 name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1),\
1782 DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1783 (True, None, None, 0))
1784        >>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
1785        (['bounce'], (None, True, None, None))
1786        >>> pf.coalesceEffectArgs(
1787        ...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
1788        ... )
1789        (['goto', '3'], (True, True, None, None))
1790        """
1791        start, end, nTokens = normalizeEnds(tokens, start, end)
1792        where = start
1793        result: List[  # List of effect args
1794            Union[
1795                base.Capability,  # covers 'str' possibility
1796                Tuple[base.Token, base.TokenCount],
1797                Tuple[Literal['skill'], base.Skill, base.Level],
1798                Tuple[base.MechanismSpecifier, base.MechanismState],
1799                base.DecisionSpecifier,
1800                List[commands.Command],
1801                Set[str]
1802            ]
1803        ] = []
1804        inCommon: Optional[bool] = None
1805        isHidden: Optional[bool] = None
1806        charges: Optional[int] = None
1807        delay: Optional[int] = None
1808        while where <= end:
1809            following, thisEnd = self.parseOneEffectArg(tokens, where, end)
1810            if following == Lexeme.inCommon:
1811                if inCommon is not None:
1812                    raise ParseError(
1813                        f"In-common effect modifier specified more than"
1814                        f" once in effect args:"
1815                        f"\n{tokens[start:end + 1]}"
1816                    )
1817                inCommon = True
1818            elif following == Lexeme.isHidden:
1819                if isHidden is not None:
1820                    raise ParseError(
1821                        f"Is-hidden effect modifier specified more than"
1822                        f" once in effect args:"
1823                        f"\n{tokens[start:end + 1]}"
1824                    )
1825                isHidden = True
1826            elif (
1827                isinstance(following, tuple)
1828            and len(following) == 2
1829            and following[0] in (Lexeme.effectCharges, Lexeme.sepOrDelay)
1830            and isinstance(following[1], int)
1831            ):
1832                if following[0] == Lexeme.effectCharges:
1833                    if charges is not None:
1834                        raise ParseError(
1835                            f"Charges effect modifier specified more than"
1836                            f" once in effect args:"
1837                            f"\n{tokens[start:end + 1]}"
1838                        )
1839                    charges = following[1]
1840                else:
1841                    if delay is not None:
1842                        raise ParseError(
1843                            f"Delay effect modifier specified more than"
1844                            f" once in effect args:"
1845                            f"\n{tokens[start:end + 1]}"
1846                        )
1847                    delay = following[1]
1848            elif (
1849                    isinstance(following, base.Capability)
1850                 or (
1851                    isinstance(following, tuple)
1852                and len(following) == 2
1853                and isinstance(following[0], base.Token)
1854                and isinstance(following[1], base.TokenCount)
1855                ) or (
1856                    isinstance(following, tuple)
1857                and len(following) == 3
1858                and following[0] == 'skill'
1859                and isinstance(following[1], base.Skill)
1860                and isinstance(following[2], base.Level)
1861                ) or (
1862                    isinstance(following, tuple)
1863                and len(following) == 2
1864                and isinstance(following[0], base.MechanismSpecifier)
1865                and isinstance(following[1], base.MechanismState)
1866                ) or (
1867                    isinstance(following, base.DecisionSpecifier)
1868                ) or (
1869                    isinstance(following, list)
1870                and all(isinstance(item, tuple) for item in following)
1871                    # TODO: Stricter command list check here?
1872                ) or (
1873                    isinstance(following, set)
1874                and all(isinstance(item, str) for item in following)
1875                )
1876            ):
1877                result.append(following)
1878            else:
1879                raise ParseError(f"Invalid coalesced argument: {following}")
1880            where = thisEnd + 1
1881
1882        return (result, (inCommon, isHidden, charges, delay))
1883
1884    def parseEffectFromTokens(
1885        self,
1886        tokens: LexedTokens,
1887        start: int = 0,
1888        end: int = -1
1889    ) -> base.Effect:
1890        """
1891        Given a region of a list of lexed tokens specifying an effect,
1892        returns the `Effect` object that those tokens specify.
1893        """
1894        start, end, nTokens = normalizeEnds(tokens, start, end)
1895
1896        # Check for empty list
1897        if nTokens == 0:
1898            raise ParseError(
1899                "Effect must include at least a type."
1900            )
1901
1902        firstPart = tokens[start]
1903
1904        if isinstance(firstPart, Lexeme):
1905            raise ParseError(
1906                f"First part of effect must be an effect type. Got"
1907                f" {firstPart} ({repr(self.formatDict[firstPart])})."
1908            )
1909
1910        firstPart = cast(str, firstPart)
1911
1912        # Get the effect type
1913        fType = self.effectType(firstPart)
1914
1915        if fType is None:
1916            raise ParseError(
1917                f"Unrecognized effect type {firstPart!r}. Check the"
1918                f" EffectType entries in the effect names dictionary."
1919            )
1920
1921        if start + 1 > end:  # No tokens left: set empty args
1922            groupedArgs: List[
1923                Union[
1924                    base.Capability,  # covers 'str' possibility
1925                    Tuple[base.Token, base.TokenCount],
1926                    Tuple[Literal['skill'], base.Skill, base.Level],
1927                    Tuple[base.MechanismSpecifier, base.MechanismState],
1928                    base.DecisionSpecifier,
1929                    List[commands.Command],
1930                    Set[str]
1931                ]
1932            ] = []
1933            modifiers: Tuple[
1934                Optional[bool],
1935                Optional[bool],
1936                Optional[int],
1937                Optional[int]
1938            ] = (None, None, None, None)
1939        else:  # Coalesce remaining tokens if there are any
1940            groupedArgs, modifiers = self.coalesceEffectArgs(
1941                tokens,
1942                start + 1,
1943                end
1944            )
1945
1946        # Set up arguments for base.effect and handle modifiers first
1947        args: Dict[
1948            str,
1949            Union[
1950                None,
1951                base.ContextSpecifier,
1952                base.Capability,
1953                Tuple[base.Token, base.TokenCount],
1954                Tuple[Literal['skill'], base.Skill, base.Level],
1955                Tuple[base.MechanismSpecifier, base.MechanismState],
1956                Tuple[base.MechanismSpecifier, List[base.MechanismState]],
1957                List[base.Capability],
1958                base.AnyDecisionSpecifier,
1959                Tuple[base.AnyDecisionSpecifier, base.FocalPointName],
1960                bool,
1961                int,
1962                base.SaveSlot,
1963                Tuple[base.SaveSlot, Set[str]]
1964            ]
1965        ] = {}
1966        if modifiers[0]:
1967            args['applyTo'] = 'common'
1968        if modifiers[1]:
1969            args['hidden'] = True
1970        else:
1971            args['hidden'] = False
1972        if modifiers[2] is not None:
1973            args['charges'] = modifiers[2]
1974        if modifiers[3] is not None:
1975            args['delay'] = modifiers[3]
1976
1977        # Now handle the main effect-type-based argument
1978        if fType in ("gain", "lose"):
1979            if len(groupedArgs) != 1:
1980                raise ParseError(
1981                    f"'{fType}' effect must have exactly one grouped"
1982                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
1983                )
1984            thing = groupedArgs[0]
1985            if isinstance(thing, tuple):
1986                if len(thing) == 2:
1987                    if (
1988                        not isinstance(thing[0], base.Token)
1989                     or not isinstance(thing[1], base.TokenCount)
1990                    ):
1991                        raise ParseError(
1992                            f"'{fType}' effect grouped arg pair must be a"
1993                            f" (token, amount) pair. Got:\n{thing}"
1994                        )
1995                elif len(thing) == 3:
1996                    if (
1997                        thing[0] != 'skill'
1998                     or not isinstance(thing[1], base.Skill)
1999                     or not isinstance(thing[2], base.Level)
2000                    ):
2001                        raise ParseError(
2002                            f"'{fType}' effect grouped arg pair must be a"
2003                            f" (token, amount) pair. Got:\n{thing}"
2004                        )
2005                else:
2006                    raise ParseError(
2007                        f"'{fType}' effect grouped arg tuple must have"
2008                        f" length 2 or 3. Got (length {len(thing)}):\n{thing}"
2009                    )
2010            elif not isinstance(thing, base.Capability):
2011                raise ParseError(
2012                    f"'{fType}' effect grouped arg must be a capability"
2013                    f" or a (token, amount) tuple. Got:\n{thing}"
2014                )
2015            args[fType] = thing
2016            return base.effect(**args)  # type:ignore
2017
2018        elif fType == "set":
2019            if len(groupedArgs) != 1:
2020                raise ParseError(
2021                    f"'{fType}' effect must have exactly one grouped"
2022                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
2023                )
2024            setVal = groupedArgs[0]
2025            if not isinstance(
2026                setVal,
2027                tuple
2028            ):
2029                raise ParseError(
2030                    f"'{fType}' effect grouped arg must be a tuple. Got:"
2031                    f"\n{setVal}"
2032                )
2033            if len(setVal) == 2:
2034                setWhat, setTo = setVal
2035                if (
2036                    isinstance(setWhat, base.Token)
2037                and isinstance(setTo, base.TokenCount)
2038                ) or (
2039                    isinstance(setWhat, base.MechanismSpecifier)
2040                and isinstance(setTo, base.MechanismState)
2041                ):
2042                    args[fType] = setVal
2043                    return base.effect(**args)  # type:ignore
2044                else:
2045                    raise ParseError(
2046                        f"Invalid '{fType}' effect grouped args:"
2047                        f"\n{groupedArgs}"
2048                    )
2049            elif len(setVal) == 3:
2050                indicator, whichSkill, setTo = setVal
2051                if (
2052                    indicator == 'skill'
2053                and isinstance(whichSkill, base.Skill)
2054                and isinstance(setTo, base.Level)
2055                ):
2056                    args[fType] = setVal
2057                    return base.effect(**args)  # type:ignore
2058                else:
2059                    raise ParseError(
2060                        f"Invalid '{fType}' effect grouped args (not a"
2061                        f" skill):\n{groupedArgs}"
2062                    )
2063            else:
2064                raise ParseError(
2065                    f"Invalid '{fType}' effect grouped args (wrong"
2066                    f" length tuple):\n{groupedArgs}"
2067                )
2068
2069        elif fType == "toggle":
2070            if len(groupedArgs) == 0:
2071                raise ParseError(
2072                    f"'{fType}' effect must have at least one grouped"
2073                    f" argument. Got:\n{groupedArgs}"
2074                )
2075            if (
2076                isinstance(groupedArgs[0], tuple)
2077            and len(groupedArgs[0]) == 2
2078            and isinstance(groupedArgs[0][0], base.MechanismSpecifier)
2079            and isinstance(groupedArgs[0][1], base.MechanismState)
2080            and all(
2081                    isinstance(a, base.MechanismState)
2082                    for a in groupedArgs[1:]
2083                )
2084            ):  # a mechanism toggle
2085                args[fType] = (
2086                    groupedArgs[0][0],
2087                    cast(
2088                        List[base.MechanismState],
2089                        [groupedArgs[0][1]] + groupedArgs[1:]
2090                    )
2091                )
2092                return base.effect(**args)  # type:ignore
2093            elif all(isinstance(a, base.Capability) for a in groupedArgs):
2094                # a capability toggle
2095                args[fType] = cast(List[base.Capability], groupedArgs)
2096                return base.effect(**args)  # type:ignore
2097            else:
2098                raise ParseError(
2099                    f"Invalid arguments for '{fType}' effect. Got:"
2100                    f"\n{groupedArgs}"
2101                )
2102
2103        elif fType in ("bounce", "deactivate"):
2104            if len(groupedArgs) != 0:
2105                raise ParseError(
2106                    f"'{fType}' effect may not include any"
2107                    f" arguments. Got {len(groupedArgs)}):"
2108                    f"\n{groupedArgs}"
2109                )
2110            args[fType] = True
2111            return base.effect(**args)  # type:ignore
2112
2113        elif fType == "follow":
2114            if len(groupedArgs) != 1:
2115                raise ParseError(
2116                    f"'{fType}' effect must include exactly one"
2117                    f" argument. Got {len(groupedArgs)}):"
2118                    f"\n{groupedArgs}"
2119                )
2120
2121            transition = groupedArgs[0]
2122            if not isinstance(transition, base.Transition):
2123                raise ParseError(
2124                    f"Invalid argument for '{fType}' effect. Needed a"
2125                    f" transition but got:\n{groupedArgs}"
2126                )
2127            args[fType] = transition
2128            return base.effect(**args)  # type:ignore
2129
2130        elif fType == "edit":
2131            if len(groupedArgs) == 0:
2132                raise ParseError(
2133                    "An 'edit' effect requires at least one argument."
2134                )
2135            for i, arg in enumerate(groupedArgs):
2136                if not isinstance(arg, list):
2137                    raise ParseError(
2138                        f"'edit' effect argument {i} is not a sub-list:"
2139                        f"\n  {arg!r}"
2140                        f"\nAmong arguments:"
2141                        f"\n  {groupedArgs}"
2142                    )
2143                for j, cmd in enumerate(arg):
2144                    if not isinstance(cmd, tuple):
2145                        raise ParseError(
2146                            f"'edit' effect argument {i} contains"
2147                            f" non-tuple part {j}:"
2148                            f"\n  {cmd!r}"
2149                            f"\nAmong arguments:"
2150                            f"\n  {groupedArgs}"
2151                        )
2152
2153            args[fType] = groupedArgs  # type:ignore
2154            return base.effect(**args)  # type:ignore
2155
2156        elif fType == "goto":
2157            if len(groupedArgs) not in (1, 2):
2158                raise ParseError(
2159                    f"A 'goto' effect must include either one or two"
2160                    f" grouped arguments. Got {len(groupedArgs)}:"
2161                    f"\n{groupedArgs}"
2162                )
2163
2164            first = groupedArgs[0]
2165            if not isinstance(
2166                first,
2167                (base.DecisionName, base.DecisionSpecifier)
2168            ):
2169                raise ParseError(
2170                    f"'{fType}' effect must first specify a destination"
2171                    f" decision. Got:\n{groupedArgs}"
2172                )
2173
2174            # Check if it's really a decision ID
2175            dSpec: base.AnyDecisionSpecifier
2176            if isinstance(first, base.DecisionName):
2177                try:
2178                    dSpec = int(first)
2179                except ValueError:
2180                    dSpec = first
2181            else:
2182                dSpec = first
2183
2184            if len(groupedArgs) == 2:
2185                second = groupedArgs[1]
2186                if not isinstance(second, base.FocalPointName):
2187                    raise ParseError(
2188                        f"'{fType}' effect must have a focal point name"
2189                        f" if it has a second part. Got:\n{groupedArgs}"
2190                    )
2191                args[fType] = (dSpec, second)
2192            else:
2193                args[fType] = dSpec
2194
2195            return base.effect(**args)  # type:ignore
2196
2197        elif fType == "save":
2198            if len(groupedArgs) not in (0, 1):
2199                raise ParseError(
2200                    f"'{fType}' effect must include exactly zero or one"
2201                    f" argument(s). Got {len(groupedArgs)}):"
2202                    f"\n{groupedArgs}"
2203                )
2204
2205            if len(groupedArgs) == 1:
2206                slot = groupedArgs[0]
2207            else:
2208                slot = base.DEFAULT_SAVE_SLOT
2209            if not isinstance(slot, base.SaveSlot):
2210                raise ParseError(
2211                    f"Invalid argument for '{fType}' effect. Needed a"
2212                    f" save slot but got:\n{groupedArgs}"
2213                )
2214            args[fType] = slot
2215            return base.effect(**args)  # type:ignore
2216
2217        else:
2218            raise ParseError(f"Invalid effect type: '{fType}'.")
2219
2220    def parseEffect(self, effectStr: str) -> base.Effect:
2221        """
2222        Works like `parseEffectFromTokens` but starts with a raw string.
2223        For example:
2224
2225        >>> pf = ParseFormat()
2226        >>> pf.parseEffect("gain jump") == base.effect(gain='jump')
2227        True
2228        >>> pf.parseEffect("set door:open") == base.effect(
2229        ...     set=(
2230        ...         base.MechanismSpecifier(None, None, None, 'door'),
2231        ...         'open'
2232        ...     )
2233        ... )
2234        True
2235        >>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
2236        True
2237        >>> pf.parseEffect("set agility^3") == base.effect(
2238        ...     set=('skill', 'agility', 3)
2239        ... )
2240        True
2241        """
2242        return self.parseEffectFromTokens(self.lex(effectStr))
2243
2244    def unparseEffect(self, effect: base.Effect) -> str:
2245        """
2246        The opposite of `parseEffect`; turns an effect back into a
2247        string reprensentation.
2248
2249        For example:
2250
2251        >>> pf = ParseFormat()
2252        >>> e = {
2253        ...     "type": "gain",
2254        ...     "applyTo": "active",
2255        ...     "value": "flight",
2256        ...     "delay": None,
2257        ...     "charges": None,
2258        ...     "hidden": False
2259        ... }
2260        >>> pf.unparseEffect(e)
2261        'gain flight'
2262        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2263        True
2264        >>> s = 'gain flight'
2265        >>> pf.unparseEffect(pf.parseEffect(s)) == s
2266        True
2267        >>> s2 = '  gain\\nflight'
2268        >>> pf.unparseEffect(pf.parseEffect(s2)) == s
2269        True
2270        >>> e = {
2271        ...     "type": "gain",
2272        ...     "applyTo": "active",
2273        ...     "value": ("gold", 5),
2274        ...     "delay": 1,
2275        ...     "charges": 2,
2276        ...     "hidden": False
2277        ... }
2278        >>> pf.unparseEffect(e)
2279        'gain gold*5 ,1 =2'
2280        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2281        True
2282        >>> e = {
2283        ...     "type": "set",
2284        ...     "applyTo": "active",
2285        ...     "value": (
2286        ...         base.MechanismSpecifier(None, None, None, "gears"),
2287        ...         "on"
2288        ...     ),
2289        ...     "delay": None,
2290        ...     "charges": 1,
2291        ...     "hidden": False
2292        ... }
2293        >>> pf.unparseEffect(e)
2294        'set gears:on =1'
2295        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2296        True
2297        >>> e = {
2298        ...     "type": "toggle",
2299        ...     "applyTo": "active",
2300        ...     "value": ["red", "blue"],
2301        ...     "delay": None,
2302        ...     "charges": None,
2303        ...     "hidden": False
2304        ... }
2305        >>> pf.unparseEffect(e)
2306        'toggle red blue'
2307        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2308        True
2309        >>> e = {
2310        ...     "type": "toggle",
2311        ...     "applyTo": "active",
2312        ...     "value": (
2313        ...         base.MechanismSpecifier(None, None, None, "switch"),
2314        ...         ["on", "off"]
2315        ...     ),
2316        ...     "delay": None,
2317        ...     "charges": None,
2318        ...     "hidden": False
2319        ... }
2320        >>> pf.unparseEffect(e)
2321        'toggle switch:on off'
2322        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2323        True
2324        >>> e = {
2325        ...     "type": "deactivate",
2326        ...     "applyTo": "active",
2327        ...     "value": None,
2328        ...     "delay": 2,
2329        ...     "charges": None,
2330        ...     "hidden": False
2331        ... }
2332        >>> pf.unparseEffect(e)
2333        'deactivate ,2'
2334        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2335        True
2336        >>> e = {
2337        ...     "type": "goto",
2338        ...     "applyTo": "common",
2339        ...     "value": 3,
2340        ...     "delay": None,
2341        ...     "charges": None,
2342        ...     "hidden": False
2343        ... }
2344        >>> pf.unparseEffect(e)
2345        'goto 3 +c'
2346        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2347        True
2348        >>> e = {
2349        ...     "type": "goto",
2350        ...     "applyTo": "common",
2351        ...     "value": 3,
2352        ...     "delay": None,
2353        ...     "charges": None,
2354        ...     "hidden": True
2355        ... }
2356        >>> pf.unparseEffect(e)
2357        'goto 3 +c +h'
2358        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2359        True
2360        >>> e = {
2361        ...     "type": "goto",
2362        ...     "applyTo": "active",
2363        ...     "value": 'home',
2364        ...     "delay": None,
2365        ...     "charges": None,
2366        ...     "hidden": False
2367        ... }
2368        >>> pf.unparseEffect(e)
2369        'goto home'
2370        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2371        True
2372        >>> e = base.effect(edit=[
2373        ...     [
2374        ...         commands.command('val', '5'),
2375        ...         commands.command('empty', 'list'),
2376        ...         commands.command('append', '$_')
2377        ...     ],
2378        ...     [
2379        ...         commands.command('val', '11'),
2380        ...         commands.command('assign', 'var', '$_'),
2381        ...         commands.command('op', '+', '$var', '$var')
2382        ...     ],
2383        ... ])
2384        >>> pf.unparseEffect(e)
2385        'edit {\\n  val 5;\\n  empty list;\\n  append $_;\\n}\
2386 {\\n  val 11;\\n  assign var $_;\\n  op + $var $var;\\n}'
2387        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2388        True
2389        """
2390        result: List[str] = []
2391
2392        # Reverse the effect type into a marker
2393        eType = effect['type']
2394        for key, val in self.effectNames.items():
2395            if val == eType:
2396                if len(result) != 0:
2397                    raise ParseError(
2398                        f"Effect map contains multiple matching entries"
2399                        f"for effect type '{effect['type']}':"
2400                        f" '{result[0]}' and '{key}'"
2401                    )
2402                result.append(key)
2403                # Don't break 'cause we'd like to check uniqueness
2404
2405        eVal = effect['value']
2406        if eType in ('gain', 'lose'):
2407            eVal = cast(Union[base.Capability, Tuple[base.Token, int]], eVal)
2408            if isinstance(eVal, str):  # a capability
2409                result.append(eVal)
2410            else:  # a token
2411                result.append(
2412                    eVal[0]
2413                  + self.formatDict[Lexeme.tokenCount]
2414                  + str(eVal[1])
2415                )
2416        elif eType == 'set':
2417            eVal = cast(
2418                # TODO: Add skill level setting here & elsewhere
2419                Union[
2420                    Tuple[base.Token, base.TokenCount],
2421                    Tuple[base.MechanismName, base.MechanismState]
2422                ],
2423                eVal
2424            )
2425            if len(eVal) != 2:
2426                raise ValueError(
2427                    f"'set' effect has non-length-2 value:"
2428                    f"\n  {repr(effect)}"
2429                )
2430            if isinstance(eVal[1], int):  # a token count
2431                result.append(eVal[0])
2432                result.append(self.formatDict[Lexeme.tokenCount])
2433                result.append(str(eVal[1]))
2434            else:  # a mechanism
2435                if isinstance(eVal[0], base.MechanismSpecifier):
2436                    mSpec = self.unparseMechanismSpecifier(eVal[0])
2437                else:
2438                    assert isinstance(eVal[0], base.MechanismName)
2439                    mSpec = eVal[0]
2440                result.append(
2441                    mSpec
2442                  + self.formatDict[Lexeme.mechanismSeparator]
2443                  + eVal[1]
2444                )
2445        elif eType == 'toggle':
2446            if isinstance(eVal, tuple):  # mechanism states
2447                tSpec, states = cast(
2448                    Tuple[
2449                        base.AnyMechanismSpecifier,
2450                        List[base.MechanismState]
2451                    ],
2452                    eVal
2453                )
2454                firstState = states[0]
2455                restStates = states[1:]
2456                if isinstance(tSpec, base.MechanismSpecifier):
2457                    mStr = self.unparseMechanismSpecifier(tSpec)
2458                else:
2459                    mStr = str(tSpec)
2460                result.append(
2461                    mStr
2462                  + self.formatDict[Lexeme.mechanismSeparator]
2463                  + firstState
2464                )
2465                result.extend(restStates)
2466            else:  # capabilities
2467                assert isinstance(eVal, list)
2468                eVal = cast(List[base.Capability], eVal)
2469                result.extend(eVal)
2470        elif eType in ('deactivate', 'bounce'):
2471            if eVal is not None:
2472                raise ValueError(
2473                    f"'{eType}' effect has non-None value:"
2474                    f"\n  {repr(effect)}"
2475                )
2476        elif eType == 'follow':
2477            eVal = cast(base.Token, eVal)
2478            result.append(eVal)
2479        elif eType == 'edit':
2480            eVal = cast(List[List[commands.Command]], eVal)
2481            if len(eVal) == 0:
2482                result[-1] = '{}'
2483            else:
2484                for cmdList in eVal:
2485                    result.append(
2486                        self.unparseCommandList(cmdList)
2487                    )
2488        elif eType == 'goto':
2489            if isinstance(eVal, base.DecisionSpecifier):
2490                result.append(self.unparseDecisionSpecifier(eVal))
2491            elif isinstance(eVal, (base.DecisionID, base.DecisionName)):
2492                result.append(str(eVal))
2493            elif (
2494                isinstance(eVal, tuple)
2495            and len(eVal) == 2
2496            and isinstance(eVal[1], base.FocalPointName)
2497            ):
2498                if isinstance(eVal[0], base.DecisionSpecifier):
2499                    result.append(self.unparseDecisionSpecifier(eVal[0]))
2500                else:
2501                    result.append(str(eVal[0]))
2502                result.append(eVal[1])
2503            else:
2504                raise ValueError(
2505                    f"'{eType}' effect has invalid value {eVal}"
2506                )
2507        else:
2508            raise ValueError(
2509                f"Unrecognized effect type '{eType}' in effect:"
2510                f"\n  {repr(effect)}"
2511            )
2512
2513        # Add modifier strings
2514        if effect['applyTo'] == 'common':
2515            result.append(self.formatDict[Lexeme.inCommon])
2516
2517        if effect['hidden']:
2518            result.append(self.formatDict[Lexeme.isHidden])
2519
2520        dVal = effect['delay']
2521        if dVal is not None:
2522            result.append(
2523                self.formatDict[Lexeme.sepOrDelay] + str(dVal)
2524            )
2525
2526        cVal = effect['charges']
2527        if cVal is not None:
2528            result.append(
2529                self.formatDict[Lexeme.effectCharges] + str(cVal)
2530            )
2531
2532        joined = ''
2533        before = False
2534        for r in result:
2535            if (
2536                r.startswith(' ')
2537             or r.startswith('\n')
2538             or r.endswith(' ')
2539             or r.endswith('\n')
2540            ):
2541                joined += r
2542                before = False
2543            else:
2544                joined += (' ' if before else '') + r
2545                before = True
2546        return joined
2547
2548    def parseDecisionSpecifierFromTokens(
2549        self,
2550        tokens: LexedTokens,
2551        start: int = 0
2552    ) -> Tuple[Union[base.DecisionSpecifier, int], int]:
2553        """
2554        Parses a decision specifier starting at the specified position
2555        in the given tokens list. No ending position is specified, but
2556        instead this function returns a tuple containing the parsed
2557        `base.DecisionSpecifier` along with an index in the tokens list
2558        where the end of the specifier was found.
2559
2560        For example:
2561
2562        >>> pf = ParseFormat()
2563        >>> pf.parseDecisionSpecifierFromTokens(['m'])
2564        (DecisionSpecifier(domain=None, zone=None, name='m'), 0)
2565        >>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
2566        (12, 0)
2567        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
2568        (DecisionSpecifier(domain=None, zone=None, name='a'), 0)
2569        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
2570        (DecisionSpecifier(domain=None, zone=None, name='m'), 1)
2571        >>> pf.parseDecisionSpecifierFromTokens(
2572        ...     ['a', Lexeme.domainSeparator, 'm']
2573        ... )
2574        (DecisionSpecifier(domain='a', zone=None, name='m'), 2)
2575        >>> pf.parseDecisionSpecifierFromTokens(
2576        ...     ['a', Lexeme.zoneSeparator, 'm']
2577        ... )
2578        (DecisionSpecifier(domain=None, zone='a', name='m'), 2)
2579        >>> pf.parseDecisionSpecifierFromTokens(
2580        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
2581        ... )
2582        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2583        >>> pf.parseDecisionSpecifierFromTokens(
2584        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
2585        ... )
2586        (DecisionSpecifier(domain='a', zone='b', name='m'), 4)
2587        >>> pf.parseDecisionSpecifierFromTokens(
2588        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
2589        ... )
2590        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2591        >>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
2592        ...     ['a', Lexeme.zoneSeparator, '5'],
2593        ... )
2594        Traceback (most recent call last):
2595        ...
2596        exploration.base.InvalidDecisionSpecifierError...
2597        >>> pf.parseDecisionSpecifierFromTokens(
2598        ...     ['d', Lexeme.domainSeparator, '123']
2599        ... )
2600        Traceback (most recent call last):
2601        ...
2602        exploration.base.InvalidDecisionSpecifierError...
2603        >>> pf.parseDecisionSpecifierFromTokens(
2604        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2605        ...     1
2606        ... )
2607        Traceback (most recent call last):
2608        ...
2609        exploration.parsing.ParseError...
2610        >>> pf.parseDecisionSpecifierFromTokens(
2611        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2612        ...     2
2613        ... )
2614        (DecisionSpecifier(domain='b', zone=None, name='m'), 4)
2615        >>> pf.parseDecisionSpecifierFromTokens(
2616        ...     [
2617        ...         'a',
2618        ...         Lexeme.domainSeparator,
2619        ...         'b',
2620        ...         Lexeme.zoneSeparator,
2621        ...         'c',
2622        ...         Lexeme.zoneSeparator,
2623        ...         'm'
2624        ...     ]
2625        ... )
2626        (DecisionSpecifier(domain='a', zone='b', name='c'), 4)
2627        >>> pf.parseDecisionSpecifierFromTokens(
2628        ...     [
2629        ...         'a',
2630        ...         Lexeme.domainSeparator,
2631        ...         'b',
2632        ...         Lexeme.zoneSeparator,
2633        ...         'c',
2634        ...         Lexeme.zoneSeparator,
2635        ...         'm'
2636        ...     ],
2637        ...     2
2638        ... )
2639        (DecisionSpecifier(domain=None, zone='b', name='c'), 4)
2640        >>> pf.parseDecisionSpecifierFromTokens(
2641        ...     [
2642        ...         'a',
2643        ...         Lexeme.domainSeparator,
2644        ...         'b',
2645        ...         Lexeme.zoneSeparator,
2646        ...         'c',
2647        ...         Lexeme.zoneSeparator,
2648        ...         'm'
2649        ...     ],
2650        ...     4
2651        ... )
2652        (DecisionSpecifier(domain=None, zone='c', name='m'), 6)
2653        >>> pf.parseDecisionSpecifierFromTokens(
2654        ...     [
2655        ...         'set',
2656        ...         'main',
2657        ...         Lexeme.domainSeparator,
2658        ...         'zone',
2659        ...         Lexeme.zoneSeparator,
2660        ...         'compass',
2661        ...         'north',
2662        ...         'bounce',
2663        ...     ],
2664        ...     1
2665        ... )
2666        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
2667        """
2668        # Check bounds & normalize start index
2669        nTokens = len(tokens)
2670        if start < -nTokens:
2671            raise IndexError(
2672                f"Invalid start index {start} for {nTokens} tokens (too"
2673                f" negative)."
2674            )
2675        elif start >= nTokens:
2676            raise IndexError(
2677                f"Invalid start index {start} for {nTokens} tokens (too"
2678                f" big)."
2679            )
2680        elif start < 0:
2681            start = nTokens + start
2682
2683        assert (start < nTokens)
2684
2685        first = tokens[start]
2686        if not isinstance(first, str):
2687            raise ParseError(
2688                f"Invalid domain specifier (must start with a name or"
2689                f" id; got: {first} = {self.formatDict[first]})."
2690            )
2691
2692        ds = base.DecisionSpecifier(None, None, first)
2693        result = (base.idOrDecisionSpecifier(ds), start)
2694
2695        domain = None
2696        zoneOrDecision = None
2697
2698        if start + 1 >= nTokens:  # at end of tokens
2699            return result
2700
2701        firstSep = tokens[start + 1]
2702        if firstSep == Lexeme.domainSeparator:
2703            domain = first
2704        elif firstSep == Lexeme.zoneSeparator:
2705            zoneOrDecision = first
2706        else:
2707            return result
2708
2709        if start + 2 >= nTokens:
2710            return result
2711
2712        second = tokens[start + 2]
2713        if isinstance(second, Lexeme):
2714            return result
2715
2716        ds = base.DecisionSpecifier(domain, zoneOrDecision, second)
2717        result = (base.idOrDecisionSpecifier(ds), start + 2)
2718
2719        if start + 3 >= nTokens:
2720            return result
2721
2722        secondSep = tokens[start + 3]
2723        if start + 4 >= nTokens:
2724            return result
2725
2726        third = tokens[start + 4]
2727        if secondSep == Lexeme.zoneSeparator:
2728            if zoneOrDecision is not None:  # two in a row
2729                return result
2730            else:
2731                if not isinstance(third, base.DecisionName):
2732                    return result
2733                else:
2734                    zoneOrDecision = second
2735        else:
2736            return result
2737
2738        if isinstance(third, Lexeme):
2739            return result
2740
2741        ds = base.DecisionSpecifier(domain, zoneOrDecision, third)
2742        return (base.idOrDecisionSpecifier(ds), start + 4)
2743
2744    def parseDecisionSpecifier(
2745        self,
2746        specString: str
2747    ) -> Union[base.DecisionID, base.DecisionSpecifier]:
2748        """
2749        Parses a full `DecisionSpecifier` from a single string. Can
2750        parse integer decision IDs in string form, and returns a
2751        `DecisionID` in that case, otherwise returns a
2752        `DecisionSpecifier`. Assumes that all int-convertible strings
2753        are decision IDs, so it cannot deal with feature names which are
2754        just numbers.
2755
2756        For example:
2757
2758        >>> pf = ParseFormat()
2759        >>> pf.parseDecisionSpecifier('example')
2760        DecisionSpecifier(domain=None, zone=None, name='example')
2761        >>> pf.parseDecisionSpecifier('outer::example')
2762        DecisionSpecifier(domain=None, zone='outer', name='example')
2763        >>> pf.parseDecisionSpecifier('domain//region::feature')
2764        DecisionSpecifier(domain='domain', zone='region', name='feature')
2765        >>> pf.parseDecisionSpecifier('123')
2766        123
2767        >>> pf.parseDecisionSpecifier('region::domain//feature')
2768        Traceback (most recent call last):
2769        ...
2770        exploration.base.InvalidDecisionSpecifierError...
2771        >>> pf.parseDecisionSpecifier('domain1//domain2//feature')
2772        Traceback (most recent call last):
2773        ...
2774        exploration.base.InvalidDecisionSpecifierError...
2775        >>> pf.parseDecisionSpecifier('domain//123')
2776        Traceback (most recent call last):
2777        ...
2778        exploration.base.InvalidDecisionSpecifierError...
2779        >>> pf.parseDecisionSpecifier('region::123')
2780        Traceback (most recent call last):
2781        ...
2782        exploration.base.InvalidDecisionSpecifierError...
2783        """
2784        try:
2785            return int(specString)
2786        except ValueError:
2787            tokens = self.lex(specString)
2788            result, end = self.parseDecisionSpecifierFromTokens(tokens)
2789            if end != len(tokens) - 1:
2790                raise base.InvalidDecisionSpecifierError(
2791                    f"Junk after end of decision specifier:"
2792                    f"\n{tokens[end + 1:]}"
2793                )
2794            return result
2795
2796    def parseFeatureSpecifierFromTokens(
2797        self,
2798        tokens: LexedTokens,
2799        start: int = 0,
2800        limit: int = -1
2801    ) -> Tuple[base.FeatureSpecifier, int]:
2802        """
2803        Parses a `FeatureSpecifier` starting from the specified part of
2804        a tokens list. Returns a tuple containing the feature specifier
2805        and the end position of the end of the feature specifier.
2806
2807        Can parse integer feature IDs in string form, as well as nested
2808        feature specifiers and plain feature specifiers. Assumes that
2809        all int-convertible strings are feature IDs, so it cannot deal
2810        with feature names which are just numbers.
2811
2812        For example:
2813
2814        >>> pf = ParseFormat()
2815        >>> pf.parseFeatureSpecifierFromTokens(['example'])
2816        (FeatureSpecifier(domain=None, within=[], feature='example',\
2817 part=None), 0)
2818        >>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
2819        (FeatureSpecifier(domain=None, within=[], feature='example2',\
2820 part=None), 1)
2821        >>> pf.parseFeatureSpecifierFromTokens(
2822        ...     [
2823        ...         'domain',
2824        ...         Lexeme.domainSeparator,
2825        ...         'region',
2826        ...         Lexeme.zoneSeparator,
2827        ...         'feature',
2828        ...         Lexeme.partSeparator,
2829        ...         'part'
2830        ...     ]
2831        ... )
2832        (FeatureSpecifier(domain='domain', within=['region'],\
2833 feature='feature', part='part'), 6)
2834        >>> pf.parseFeatureSpecifierFromTokens(
2835        ...     [
2836        ...         'outerRegion',
2837        ...         Lexeme.zoneSeparator,
2838        ...         'midRegion',
2839        ...         Lexeme.zoneSeparator,
2840        ...         'innerRegion',
2841        ...         Lexeme.zoneSeparator,
2842        ...         'feature'
2843        ...     ]
2844        ... )
2845        (FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
2846 'innerRegion'], feature='feature', part=None), 6)
2847        >>> pf.parseFeatureSpecifierFromTokens(
2848        ...     [
2849        ...         'outerRegion',
2850        ...         Lexeme.zoneSeparator,
2851        ...         'midRegion',
2852        ...         Lexeme.zoneSeparator,
2853        ...         'innerRegion',
2854        ...         Lexeme.zoneSeparator,
2855        ...         'feature'
2856        ...     ],
2857        ...     1
2858        ... )
2859        Traceback (most recent call last):
2860        ...
2861        exploration.parsing.InvalidFeatureSpecifierError...
2862        >>> pf.parseFeatureSpecifierFromTokens(
2863        ...     [
2864        ...         'outerRegion',
2865        ...         Lexeme.zoneSeparator,
2866        ...         'midRegion',
2867        ...         Lexeme.zoneSeparator,
2868        ...         'innerRegion',
2869        ...         Lexeme.zoneSeparator,
2870        ...         'feature'
2871        ...     ],
2872        ...     2
2873        ... )
2874        (FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'],\
2875 feature='feature', part=None), 6)
2876        >>> pf.parseFeatureSpecifierFromTokens(
2877        ...     [
2878        ...         'outerRegion',
2879        ...         Lexeme.zoneSeparator,
2880        ...         'feature',
2881        ...         Lexeme.domainSeparator,
2882        ...         'after',
2883        ...     ]
2884        ... )
2885        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2886 feature='feature', part=None), 2)
2887        >>> pf.parseFeatureSpecifierFromTokens(
2888        ...     [
2889        ...         'outerRegion',
2890        ...         Lexeme.zoneSeparator,
2891        ...         'feature',
2892        ...         Lexeme.domainSeparator,
2893        ...         'after',
2894        ...     ],
2895        ...     2
2896        ... )
2897        (FeatureSpecifier(domain='feature', within=[], feature='after',\
2898 part=None), 4)
2899        >>> # Including a limit:
2900        >>> pf.parseFeatureSpecifierFromTokens(
2901        ...     [
2902        ...         'outerRegion',
2903        ...         Lexeme.zoneSeparator,
2904        ...         'midRegion',
2905        ...         Lexeme.zoneSeparator,
2906        ...         'feature',
2907        ...     ],
2908        ...     0,
2909        ...     2
2910        ... )
2911        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2912 feature='midRegion', part=None), 2)
2913        >>> pf.parseFeatureSpecifierFromTokens(
2914        ...     [
2915        ...         'outerRegion',
2916        ...         Lexeme.zoneSeparator,
2917        ...         'midRegion',
2918        ...         Lexeme.zoneSeparator,
2919        ...         'feature',
2920        ...     ],
2921        ...     0,
2922        ...     0
2923        ... )
2924        (FeatureSpecifier(domain=None, within=[], feature='outerRegion',\
2925 part=None), 0)
2926        >>> pf.parseFeatureSpecifierFromTokens(
2927        ...     [
2928        ...         'region',
2929        ...         Lexeme.zoneSeparator,
2930        ...         Lexeme.zoneSeparator,
2931        ...         'feature',
2932        ...     ]
2933        ... )
2934        (FeatureSpecifier(domain=None, within=[], feature='region',\
2935 part=None), 0)
2936        """
2937        start, limit, nTokens = normalizeEnds(tokens, start, limit)
2938
2939        if nTokens == 0:
2940            raise InvalidFeatureSpecifierError(
2941                "Can't parse a feature specifier from 0 tokens."
2942            )
2943        first = tokens[start]
2944        if isinstance(first, Lexeme):
2945            raise InvalidFeatureSpecifierError(
2946                f"Feature specifier can't begin with a special token."
2947                f"Got:\n{tokens[start:limit + 1]}"
2948            )
2949
2950        if nTokens in (1, 2):
2951            # 2 tokens isn't enough for a second part
2952            fs = base.FeatureSpecifier(
2953                domain=None,
2954                within=[],
2955                feature=first,
2956                part=None
2957            )
2958            return (base.normalizeFeatureSpecifier(fs), start)
2959
2960        firstSep = tokens[start + 1]
2961        secondPart = tokens[start + 2]
2962
2963        if (
2964            firstSep not in (
2965                Lexeme.domainSeparator,
2966                Lexeme.zoneSeparator,
2967                Lexeme.partSeparator
2968            )
2969         or not isinstance(secondPart, str)
2970        ):
2971            # Following tokens won't work out
2972            fs = base.FeatureSpecifier(
2973                domain=None,
2974                within=[],
2975                feature=first,
2976                part=None
2977            )
2978            return (base.normalizeFeatureSpecifier(fs), start)
2979
2980        if firstSep == Lexeme.domainSeparator:
2981            if start + 2 > limit:
2982                return (
2983                    base.FeatureSpecifier(
2984                        domain=first,
2985                        within=[],
2986                        feature=secondPart,
2987                        part=None
2988                    ),
2989                    start + 2
2990                )
2991            else:
2992                rest, restEnd = self.parseFeatureSpecifierFromTokens(
2993                    tokens,
2994                    start + 2,
2995                    limit
2996                )
2997                if rest.domain is not None:  # two domainSeparators in a row
2998                    fs = base.FeatureSpecifier(
2999                        domain=first,
3000                        within=[],
3001                        feature=rest.domain,
3002                        part=None
3003                    )
3004                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3005                else:
3006                    fs = base.FeatureSpecifier(
3007                        domain=first,
3008                        within=rest.within,
3009                        feature=rest.feature,
3010                        part=rest.part
3011                    )
3012                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3013
3014        elif firstSep == Lexeme.zoneSeparator:
3015            if start + 2 > limit:
3016                fs = base.FeatureSpecifier(
3017                    domain=None,
3018                    within=[first],
3019                    feature=secondPart,
3020                    part=None
3021                )
3022                return (base.normalizeFeatureSpecifier(fs), start + 2)
3023            else:
3024                rest, restEnd = self.parseFeatureSpecifierFromTokens(
3025                    tokens,
3026                    start + 2,
3027                    limit
3028                )
3029                if rest.domain is not None:  # domain sep after zone sep
3030                    fs = base.FeatureSpecifier(
3031                        domain=None,
3032                        within=[first],
3033                        feature=rest.domain,
3034                        part=None
3035                    )
3036                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3037                else:
3038                    within = [first]
3039                    within.extend(rest.within)
3040                    fs = base.FeatureSpecifier(
3041                        domain=None,
3042                        within=within,
3043                        feature=rest.feature,
3044                        part=rest.part
3045                    )
3046                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3047
3048        else:  # must be partSeparator
3049            fs = base.FeatureSpecifier(
3050                domain=None,
3051                within=[],
3052                feature=first,
3053                part=secondPart
3054            )
3055            return (base.normalizeFeatureSpecifier(fs), start + 2)
3056
3057    def parseFeatureSpecifier(self, specString: str) -> base.FeatureSpecifier:
3058        """
3059        Parses a full `FeatureSpecifier` from a single string. See
3060        `parseFeatureSpecifierFromTokens`.
3061
3062        >>> pf = ParseFormat()
3063        >>> pf.parseFeatureSpecifier('example')
3064        FeatureSpecifier(domain=None, within=[], feature='example', part=None)
3065        >>> pf.parseFeatureSpecifier('outer::example')
3066        FeatureSpecifier(domain=None, within=['outer'], feature='example',\
3067 part=None)
3068        >>> pf.parseFeatureSpecifier('example%%middle')
3069        FeatureSpecifier(domain=None, within=[], feature='example',\
3070 part='middle')
3071        >>> pf.parseFeatureSpecifier('domain//region::feature%%part')
3072        FeatureSpecifier(domain='domain', within=['region'],\
3073 feature='feature', part='part')
3074        >>> pf.parseFeatureSpecifier(
3075        ...     'outerRegion::midRegion::innerRegion::feature'
3076        ... )
3077        FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
3078 'innerRegion'], feature='feature', part=None)
3079        >>> pf.parseFeatureSpecifier('region::domain//feature')
3080        Traceback (most recent call last):
3081        ...
3082        exploration.parsing.InvalidFeatureSpecifierError...
3083        >>> pf.parseFeatureSpecifier('feature%%part1%%part2')
3084        Traceback (most recent call last):
3085        ...
3086        exploration.parsing.InvalidFeatureSpecifierError...
3087        >>> pf.parseFeatureSpecifier('domain1//domain2//feature')
3088        Traceback (most recent call last):
3089        ...
3090        exploration.parsing.InvalidFeatureSpecifierError...
3091        >>> # TODO: Issue warnings for these...
3092        >>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
3093        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3094        >>> pf.parseFeatureSpecifier('region::123')  # zone discarded
3095        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3096        >>> pf.parseFeatureSpecifier('123%%part')
3097        FeatureSpecifier(domain=None, within=[], feature=123, part='part')
3098        """
3099        tokens = self.lex(specString)
3100        result, rEnd = self.parseFeatureSpecifierFromTokens(tokens)
3101        if rEnd != len(tokens) - 1:
3102            raise InvalidFeatureSpecifierError(
3103                f"Feature specifier has extra stuff at end:"
3104                f" {tokens[rEnd + 1:]}"
3105            )
3106        else:
3107            return result
3108
3109    def normalizeFeatureSpecifier(
3110        self,
3111        spec: base.AnyFeatureSpecifier
3112    ) -> base.FeatureSpecifier:
3113        """
3114        Normalizes any kind of feature specifier into an official
3115        `FeatureSpecifier` tuple.
3116
3117        For example:
3118
3119        >>> pf = ParseFormat()
3120        >>> pf.normalizeFeatureSpecifier('town')
3121        FeatureSpecifier(domain=None, within=[], feature='town', part=None)
3122        >>> pf.normalizeFeatureSpecifier(5)
3123        FeatureSpecifier(domain=None, within=[], feature=5, part=None)
3124        >>> pf.parseFeatureSpecifierFromTokens(
3125        ...     [
3126        ...         'domain',
3127        ...         Lexeme.domainSeparator,
3128        ...         'region',
3129        ...         Lexeme.zoneSeparator,
3130        ...         'feature',
3131        ...         Lexeme.partSeparator,
3132        ...         'part'
3133        ...     ]
3134        ... )
3135        (FeatureSpecifier(domain='domain', within=['region'],\
3136 feature='feature', part='part'), 6)
3137        >>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
3138        FeatureSpecifier(domain='dom', within=['one', 'two'],\
3139 feature='three', part='middle')
3140        >>> pf.normalizeFeatureSpecifier(
3141        ...   base.FeatureSpecifier(None, ['region'], 'place', None)
3142        ... )
3143        FeatureSpecifier(domain=None, within=['region'], feature='place',\
3144 part=None)
3145        >>> fs = base.FeatureSpecifier(None, [], 'place', None)
3146        >>> ns = pf.normalizeFeatureSpecifier(fs)
3147        >>> ns is fs  # Doesn't create unnecessary clones
3148        True
3149        """
3150        if isinstance(spec, base.FeatureSpecifier):
3151            return spec
3152        elif isinstance(spec, base.FeatureID):
3153            return base.FeatureSpecifier(None, [], spec, None)
3154        elif isinstance(spec, str):
3155            return self.parseFeatureSpecifier(spec)
3156        else:
3157            raise TypeError(f"Invalid feature specifier type: '{type(spec)}'")
3158
3159    def unparseChallenge(self, challenge: base.Challenge) -> str:
3160        """
3161        Turns a `base.Challenge` into a string that can be turned back
3162        into an equivalent challenge by `parseChallenge`. For example:
3163
3164        >>> pf = ParseFormat()
3165        >>> c = base.challenge(
3166        ...     skills=base.BestSkill('brains', 'brawn'),
3167        ...     level=2,
3168        ...     success=[base.effect(set=('switch', 'on'))],
3169        ...     failure=[
3170        ...         base.effect(deactivate=True, delay=1),
3171        ...         base.effect(bounce=True)
3172        ...     ],
3173        ...     outcome=True
3174        ... )
3175        >>> r = pf.unparseChallenge(c)
3176        >>> r
3177        '<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
3178        >>> pf.parseChallenge(r) == c
3179        True
3180        >>> c2 = base.challenge(
3181        ...     skills=base.CombinedSkill(
3182        ...         -2,
3183        ...         base.ConditionalSkill(
3184        ...             base.ReqCapability('tough'),
3185        ...             base.BestSkill(1),
3186        ...             base.BestSkill(-1)
3187        ...         )
3188        ...     ),
3189        ...     level=-2,
3190        ...     success=[base.effect(gain='orb')],
3191        ...     failure=[],
3192        ...     outcome=None
3193        ... )
3194        >>> r2 = pf.unparseChallenge(c2)
3195        >>> r2
3196        '<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
3197        >>> # TODO: let this parse through without BestSkills...
3198        >>> pf.parseChallenge(r2) == c2
3199        True
3200        """
3201        lt = self.formatDict[Lexeme.angleLeft]
3202        gt = self.formatDict[Lexeme.angleRight]
3203        result = (
3204            lt + str(challenge['level']) + gt
3205          + challenge['skills'].unparse()
3206        )
3207        if challenge['outcome'] is True:
3208            result += gt
3209        result += self.unparseConsequence(challenge['success'])
3210        if challenge['outcome'] is False:
3211            result += gt
3212        result += self.unparseConsequence(challenge['failure'])
3213        return result
3214
3215    def unparseCondition(self, condition: base.Condition) -> str:
3216        """
3217        Given a `base.Condition` returns a string that would result in
3218        that condition if given to `parseCondition`. For example:
3219
3220        >>> pf = ParseFormat()
3221        >>> c = base.condition(
3222        ...     condition=base.ReqAny([
3223        ...         base.ReqCapability('brawny'),
3224        ...         base.ReqNot(base.ReqTokens('weights', 3))
3225        ...     ]),
3226        ...     consequence=[base.effect(gain='power')]
3227        ... )
3228        >>> r = pf.unparseCondition(c)
3229        >>> r
3230        '??((brawny|!(weights*3))){gain power}{}'
3231        >>> pf.parseCondition(r) == c
3232        True
3233        """
3234        return (
3235            self.formatDict[Lexeme.doubleQuestionmark]
3236          + self.formatDict[Lexeme.openParen]
3237          + condition['condition'].unparse()
3238          + self.formatDict[Lexeme.closeParen]
3239          + self.unparseConsequence(condition['consequence'])
3240          + self.unparseConsequence(condition['alternative'])
3241        )
3242
3243    def unparseConsequence(self, consequence: base.Consequence) -> str:
3244        """
3245        Given a `base.Consequence`, returns a string encoding of it,
3246        using the same format that `parseConsequence` will parse. Uses
3247        function-call-like syntax and curly braces to denote different
3248        sub-consequences. See also `SkillCombination.unparse` and
3249        `Requirement.unparse` For example:
3250
3251        >>> pf = ParseFormat()
3252        >>> c = [base.effect(gain='one'), base.effect(lose='one')]
3253        >>> pf.unparseConsequence(c)
3254        '{gain one; lose one}'
3255        >>> c = [
3256        ...     base.challenge(
3257        ...         skills=base.BestSkill('brains', 'brawn'),
3258        ...         level=2,
3259        ...         success=[base.effect(set=('switch', 'on'))],
3260        ...         failure=[
3261        ...             base.effect(deactivate=True, delay=1),
3262        ...             base.effect(bounce=True)
3263        ...         ],
3264        ...         outcome=True
3265        ...     )
3266        ... ]
3267        >>> pf.unparseConsequence(c)
3268        '{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
3269        >>> c[0]['outcome'] = False
3270        >>> pf.unparseConsequence(c)
3271        '{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
3272        >>> c[0]['outcome'] = None
3273        >>> pf.unparseConsequence(c)
3274        '{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
3275        >>> c = [
3276        ...     base.condition(
3277        ...         condition=base.ReqAny([
3278        ...             base.ReqCapability('brawny'),
3279        ...             base.ReqNot(base.ReqTokens('weights', 3))
3280        ...         ]),
3281        ...         consequence=[
3282        ...             base.challenge(
3283        ...                 skills=base.CombinedSkill('brains', 'brawn'),
3284        ...                 level=3,
3285        ...                 success=[base.effect(goto='home')],
3286        ...                 failure=[base.effect(bounce=True)],
3287        ...                 outcome=None
3288        ...             )
3289        ...         ]  # no alternative -> empty list
3290        ...     )
3291        ... ]
3292        >>> pf.unparseConsequence(c)
3293        '{??((brawny|!(weights*3))){\
3294<3>sum(brains, brawn){goto home}{bounce}}{}}'
3295        >>> c = [base.effect(gain='if(power){gain "mimic"}')]
3296        >>> # TODO: Make this work!
3297        >>> # pf.unparseConsequence(c)
3298
3299        '{gain "if(power){gain \\\\"mimic\\\\"}"}'
3300        """
3301        result = self.formatDict[Lexeme.openCurly]
3302        for item in consequence:
3303            if 'skills' in item:  # a Challenge
3304                item = cast(base.Challenge, item)
3305                result += self.unparseChallenge(item)
3306
3307            elif 'value' in item:  # an Effect
3308                item = cast(base.Effect, item)
3309                result += self.unparseEffect(item)
3310
3311            elif 'condition' in item:  # a Condition
3312                item = cast(base.Condition, item)
3313                result += self.unparseCondition(item)
3314
3315            else:  # bad dict
3316                raise TypeError(
3317                    f"Invalid consequence: items in the list must be"
3318                    f" Effects, Challenges, or Conditions (got a dictionary"
3319                    f" without 'skills', 'value', or 'condition' keys)."
3320                    f"\nGot item: {repr(item)}"
3321                )
3322            result += '; '
3323
3324        if result.endswith('; '):
3325            result = result[:-2]
3326
3327        return result + self.formatDict[Lexeme.closeCurly]
3328
3329    def parseMechanismSpecifierFromTokens(
3330        self,
3331        tokens: LexedTokens,
3332        start: int = 0
3333    ) -> Tuple[base.MechanismSpecifier, int]:
3334        """
3335        Parses a mechanism specifier starting at the specified position
3336        in the given tokens list. No ending position is specified, but
3337        instead this function returns a tuple containing the parsed
3338        `base.MechanismSpecifier` along with an index in the tokens list
3339        where the end of the specifier was found.
3340
3341        For example:
3342
3343        >>> pf = ParseFormat()
3344        >>> pf.parseMechanismSpecifierFromTokens(['m'])
3345        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3346 name='m'), 0)
3347        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
3348        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3349 name='a'), 0)
3350        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
3351        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3352 name='m'), 1)
3353        >>> pf.parseMechanismSpecifierFromTokens(
3354        ...     ['a', Lexeme.domainSeparator, 'm']
3355        ... )
3356        (MechanismSpecifier(domain='a', zone=None, decision=None,\
3357 name='m'), 2)
3358        >>> pf.parseMechanismSpecifierFromTokens(
3359        ...     ['a', Lexeme.zoneSeparator, 'm']
3360        ... )
3361        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3362 name='m'), 2)
3363        >>> pf.parseMechanismSpecifierFromTokens(
3364        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
3365        ... )
3366        (MechanismSpecifier(domain=None, zone='a', decision='b',\
3367 name='m'), 4)
3368        >>> pf.parseMechanismSpecifierFromTokens(
3369        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
3370        ... )
3371        (MechanismSpecifier(domain='a', zone=None, decision='b',\
3372 name='m'), 4)
3373        >>> pf.parseMechanismSpecifierFromTokens(
3374        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
3375        ... )
3376        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3377 name='b'), 2)
3378        >>> pf.parseMechanismSpecifierFromTokens(
3379        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3380        ...     1
3381        ... )
3382        Traceback (most recent call last):
3383        ...
3384        exploration.parsing.ParseError...
3385        >>> pf.parseMechanismSpecifierFromTokens(
3386        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3387        ...     2
3388        ... )
3389        (MechanismSpecifier(domain='b', zone=None, decision=None,\
3390 name='m'), 4)
3391        >>> pf.parseMechanismSpecifierFromTokens(
3392        ...     [
3393        ...         'a',
3394        ...         Lexeme.domainSeparator,
3395        ...         'b',
3396        ...         Lexeme.zoneSeparator,
3397        ...         'c',
3398        ...         Lexeme.zoneSeparator,
3399        ...         'm'
3400        ...     ]
3401        ... )
3402        (MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
3403        >>> pf.parseMechanismSpecifierFromTokens(
3404        ...     [
3405        ...         'a',
3406        ...         Lexeme.domainSeparator,
3407        ...         'b',
3408        ...         Lexeme.zoneSeparator,
3409        ...         'c',
3410        ...         Lexeme.zoneSeparator,
3411        ...         'm'
3412        ...     ],
3413        ...     2
3414        ... )
3415        (MechanismSpecifier(domain=None, zone='b', decision='c',\
3416 name='m'), 6)
3417        >>> pf.parseMechanismSpecifierFromTokens(
3418        ...     [
3419        ...         'a',
3420        ...         Lexeme.domainSeparator,
3421        ...         'b',
3422        ...         Lexeme.zoneSeparator,
3423        ...         'c',
3424        ...         Lexeme.zoneSeparator,
3425        ...         'm'
3426        ...     ],
3427        ...     4
3428        ... )
3429        (MechanismSpecifier(domain=None, zone=None, decision='c',\
3430 name='m'), 6)
3431        >>> pf.parseMechanismSpecifierFromTokens(
3432        ...     [
3433        ...         'roomB',
3434        ...         Lexeme.zoneSeparator,
3435        ...         'switch',
3436        ...         Lexeme.mechanismSeparator,
3437        ...         'on'
3438        ...     ]
3439        ... )
3440        (MechanismSpecifier(domain=None, zone=None, decision='roomB',\
3441 name='switch'), 2)
3442        """
3443        start, tEnd, nLeft = normalizeEnds(tokens, start, -1)
3444
3445        try:
3446            dSpec, dEnd = self.parseDecisionSpecifierFromTokens(
3447                tokens,
3448                start
3449            )
3450        except ParseError:
3451            raise ParseError(
3452                "Failed to parse mechanism specifier couldn't parse"
3453                " initial mechanism name."
3454            )
3455
3456        if isinstance(dSpec, int):
3457            raise ParseError(
3458                f"Invalid mechanism specifier: cannot use a decision ID"
3459                f" as the decision part. Got: {tokens[start:]}"
3460            )
3461            # TODO: Allow that?
3462
3463        mDomain = dSpec.domain
3464        if dEnd == tEnd or dEnd == tEnd - 1:
3465            return (
3466                base.MechanismSpecifier(
3467                    domain=mDomain,
3468                    zone=None,
3469                    decision=dSpec.zone,
3470                    name=dSpec.name
3471                ),
3472                dEnd
3473            )
3474
3475        sep = tokens[dEnd + 1]
3476        after = tokens[dEnd + 2]
3477
3478        if sep == Lexeme.zoneSeparator:
3479            if isinstance(after, Lexeme):
3480                return (
3481                    base.MechanismSpecifier(
3482                        domain=mDomain,
3483                        zone=None,
3484                        decision=dSpec.zone,
3485                        name=dSpec.name
3486                    ),
3487                    dEnd
3488                )
3489            else:
3490                return (
3491                    base.MechanismSpecifier(
3492                        domain=mDomain,
3493                        zone=dSpec.zone,
3494                        decision=dSpec.name,
3495                        name=after
3496                    ),
3497                    dEnd + 2
3498                )
3499        else:
3500            return (
3501                base.MechanismSpecifier(
3502                    domain=mDomain,
3503                    zone=None,
3504                    decision=dSpec.zone,
3505                    name=dSpec.name
3506                ),
3507                dEnd
3508            )
3509
3510    def groupReqTokens(
3511        self,
3512        tokens: LexedTokens,
3513        start: int = 0,
3514        end: int = -1
3515    ) -> GroupedTokens:
3516        """
3517        Groups tokens for a requirement, stripping out all parentheses
3518        but replacing parenthesized expressions with sub-lists of tokens.
3519
3520        For example:
3521
3522        >>> pf = ParseFormat()
3523        >>> pf.groupReqTokens(['jump'])
3524        ['jump']
3525        >>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
3526        Traceback (most recent call last):
3527        ...
3528        exploration.parsing.ParseError...
3529        >>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
3530        Traceback (most recent call last):
3531        ...
3532        exploration.parsing.ParseError...
3533        >>> pf.groupReqTokens(['jump', Lexeme.closeParen])
3534        Traceback (most recent call last):
3535        ...
3536        exploration.parsing.ParseError...
3537        >>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
3538        [['jump']]
3539        >>> pf.groupReqTokens(
3540        ...     [
3541        ...         Lexeme.openParen,
3542        ...         'jump',
3543        ...         Lexeme.orBar,
3544        ...         'climb',
3545        ...         Lexeme.closeParen,
3546        ...         Lexeme.ampersand,
3547        ...         'crawl',
3548        ...     ]
3549        ... )
3550        [['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>,\
3551 'crawl']
3552        """
3553        start, end, nTokens = normalizeEnds(tokens, start, end)
3554        if nTokens == 0:
3555            raise ParseError("Ran out of tokens.")
3556
3557        resultsStack: List[GroupedTokens] = [[]]
3558        here = start
3559        while here <= end:
3560            token = tokens[here]
3561            here += 1
3562            if token == Lexeme.closeParen:
3563                if len(resultsStack) == 1:
3564                    raise ParseError(
3565                        f"Too many closing parens at index {here - 1}"
3566                        f" in:\n{tokens[start:end + 1]}"
3567                    )
3568                else:
3569                    closed = resultsStack.pop()
3570                    resultsStack[-1].append(closed)
3571            elif token == Lexeme.openParen:
3572                resultsStack.append([])
3573            else:
3574                resultsStack[-1].append(token)
3575        if len(resultsStack) != 1:
3576            raise ParseError(
3577                f"Mismatched parentheses in tokens:"
3578                f"\n{tokens[start:end + 1]}"
3579            )
3580        return resultsStack[0]
3581
3582    def groupReqTokensByPrecedence(
3583        self,
3584        tokenGroups: GroupedTokens
3585    ) -> GroupedRequirementParts:
3586        """
3587        Re-groups requirement tokens that have been grouped using
3588        `groupReqTokens` according to operator precedence, effectively
3589        creating an equivalent result which would have been obtained by
3590        `groupReqTokens` if all possible non-redundant explicit
3591        parentheses had been included.
3592
3593        Also turns each leaf part into a `Requirement`.
3594
3595        TODO: Make this actually reasonably efficient T_T
3596
3597        Examples:
3598
3599        >>> pf = ParseFormat()
3600        >>> r = pf.parseRequirement('capability&roomB::switch:on')
3601        >>> pf.groupReqTokensByPrecedence(
3602        ...     [
3603        ...         ['jump', Lexeme.orBar, 'climb'],
3604        ...         Lexeme.ampersand,
3605        ...         Lexeme.notMarker,
3606        ...         'coin',
3607        ...         Lexeme.tokenCount,
3608        ...         '3'
3609        ...     ]
3610        ... )
3611        [\
3612[\
3613[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]],\
3614 <Lexeme.ampersand: ...>,\
3615 [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]\
3616]\
3617]
3618        """
3619        subgrouped: List[Union[Lexeme, str, GroupedRequirementParts]] = []
3620        # First recursively group all parenthesized expressions
3621        for i, item in enumerate(tokenGroups):
3622            if isinstance(item, list):
3623                subgrouped.append(self.groupReqTokensByPrecedence(item))
3624            else:
3625                subgrouped.append(item)
3626
3627        # Now process all leaf requirements
3628        leavesConverted: GroupedRequirementParts = []
3629        i = 0
3630        while i < len(subgrouped):
3631            gItem = subgrouped[i]
3632
3633            if isinstance(gItem, list):
3634                leavesConverted.append(gItem)
3635            elif isinstance(gItem, Lexeme):
3636                leavesConverted.append(gItem)
3637            elif i == len(subgrouped) - 1:
3638                if isinstance(gItem, Lexeme):
3639                    raise ParseError(
3640                        f"Lexeme at end of requirement. Grouped tokens:"
3641                        f"\n{tokenGroups}"
3642                    )
3643                else:
3644                    assert isinstance(gItem, str)
3645                    if gItem == 'X':
3646                        leavesConverted.append(base.ReqImpossible())
3647                    elif gItem == 'O':
3648                        leavesConverted.append(base.ReqNothing())
3649                    else:
3650                        leavesConverted.append(base.ReqCapability(gItem))
3651            else:
3652                assert isinstance(gItem, str)
3653                try:
3654                    # TODO: Avoid list copy here...
3655                    couldBeMechanismSpecifier: LexedTokens = []
3656                    for ii in range(i, len(subgrouped)):
3657                        lexemeOrStr = subgrouped[ii]
3658                        if isinstance(lexemeOrStr, (Lexeme, str)):
3659                            couldBeMechanismSpecifier.append(lexemeOrStr)
3660                        else:
3661                            break
3662                    mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
3663                        couldBeMechanismSpecifier
3664                    )
3665                    mEnd += i
3666                    if (
3667                        mEnd >= len(subgrouped) - 2
3668                     or subgrouped[mEnd + 1] != Lexeme.mechanismSeparator
3669                    ):
3670                        raise ParseError("Not a mechanism requirement.")
3671
3672                    mState = subgrouped[mEnd + 2]
3673                    if not isinstance(mState, base.MechanismState):
3674                        raise ParseError("Not a mechanism requirement.")
3675                    leavesConverted.append(base.ReqMechanism(mSpec, mState))
3676                    i = mEnd + 2  # + 1 will happen automatically below
3677                except ParseError:
3678                    following = subgrouped[i + 1]
3679                    if following in (
3680                        Lexeme.tokenCount,
3681                        Lexeme.mechanismSeparator,
3682                        Lexeme.wigglyLine,
3683                        Lexeme.skillLevel
3684                    ):
3685                        if (
3686                            i == len(subgrouped) - 2
3687                         or isinstance(subgrouped[i + 2], Lexeme)
3688                        ):
3689                            if following == Lexeme.wigglyLine:
3690                                # Default tag value is 1
3691                                leavesConverted.append(base.ReqTag(gItem, 1))
3692                                i += 1  # another +1 automatic below
3693                            else:
3694                                raise ParseError(
3695                                    f"Lexeme at end of requirement. Grouped"
3696                                    f" tokens:\n{tokenGroups}"
3697                                )
3698                        else:
3699                            afterwards = subgrouped[i + 2]
3700                            if not isinstance(afterwards, str):
3701                                raise ParseError(
3702                                    f"Lexeme after token/mechanism/tag/skill"
3703                                    f" separator at index {i}."
3704                                    f" Grouped tokens:\n{tokenGroups}"
3705                                )
3706                            i += 2  # another +1 automatic below
3707                            if following == Lexeme.tokenCount:
3708                                try:
3709                                    tCount = int(afterwards)
3710                                except ValueError:
3711                                    raise ParseError(
3712                                        f"Token count could not be"
3713                                        f" parsed as an integer:"
3714                                        f" {afterwards!r}. Grouped"
3715                                        f" tokens:\n{tokenGroups}"
3716                                    )
3717                                leavesConverted.append(
3718                                    base.ReqTokens(gItem, tCount)
3719                                )
3720                            elif following == Lexeme.mechanismSeparator:
3721                                leavesConverted.append(
3722                                    base.ReqMechanism(gItem, afterwards)
3723                                )
3724                            elif following == Lexeme.wigglyLine:
3725                                tVal = self.parseTagValue(afterwards)
3726                                leavesConverted.append(
3727                                    base.ReqTag(gItem, tVal)
3728                                )
3729                            else:
3730                                assert following == Lexeme.skillLevel
3731                                try:
3732                                    sLevel = int(afterwards)
3733                                except ValueError:
3734                                    raise ParseError(
3735                                        f"Skill level could not be"
3736                                        f" parsed as an integer:"
3737                                        f" {afterwards!r}. Grouped"
3738                                        f" tokens:\n{tokenGroups}"
3739                                    )
3740                                leavesConverted.append(
3741                                    base.ReqLevel(gItem, sLevel)
3742                                )
3743                    else:
3744                        if gItem == 'X':
3745                            leavesConverted.append(base.ReqImpossible())
3746                        elif gItem == 'O':
3747                            leavesConverted.append(base.ReqNothing())
3748                        else:
3749                            leavesConverted.append(
3750                                base.ReqCapability(gItem)
3751                            )
3752
3753            # Finally, increment our index:
3754            i += 1
3755
3756        # Now group all NOT operators
3757        i = 0
3758        notsGrouped: GroupedRequirementParts = []
3759        while i < len(leavesConverted):
3760            leafItem = leavesConverted[i]
3761            group = []
3762            while leafItem == Lexeme.notMarker:
3763                group.append(leafItem)
3764                i += 1
3765                if i >= len(leavesConverted):
3766                    raise ParseError(
3767                        f"NOT at end of tokens:\n{leavesConverted}"
3768                    )
3769                leafItem = leavesConverted[i]
3770            if group == []:
3771                notsGrouped.append(leafItem)
3772                i += 1
3773            else:
3774                group.append(leafItem)
3775                i += 1
3776                notsGrouped.append(group)
3777
3778        # Next group all AND operators
3779        i = 0
3780        andsGrouped: GroupedRequirementParts = []
3781        while i < len(notsGrouped):
3782            notGroupItem = notsGrouped[i]
3783            if notGroupItem == Lexeme.ampersand:
3784                if i == len(notsGrouped) - 1:
3785                    raise ParseError(
3786                        f"AND at end of group in tokens:"
3787                        f"\n{tokenGroups}"
3788                        f"Which had been grouped into:"
3789                        f"\n{notsGrouped}"
3790                    )
3791                itemAfter = notsGrouped[i + 1]
3792                if isinstance(itemAfter, Lexeme):
3793                    raise ParseError(
3794                        f"Lexeme after AND in of group in tokens:"
3795                        f"\n{tokenGroups}"
3796                        f"Which had been grouped into:"
3797                        f"\n{notsGrouped}"
3798                    )
3799                assert isinstance(itemAfter, (base.Requirement, list))
3800                prev = andsGrouped[-1]
3801                if (
3802                    isinstance(prev, list)
3803                and len(prev) > 2
3804                and prev[1] == Lexeme.ampersand
3805                ):
3806                    prev.extend(notsGrouped[i:i + 2])
3807                    i += 1  # with an extra +1 below
3808                else:
3809                    andsGrouped.append(
3810                        [andsGrouped.pop()] + notsGrouped[i:i + 2]
3811                    )
3812                    i += 1 # extra +1 below
3813            else:
3814                andsGrouped.append(notGroupItem)
3815            i += 1
3816
3817        # Finally check that we only have OR operators left over
3818        i = 0
3819        finalResult: GroupedRequirementParts = []
3820        while i < len(andsGrouped):
3821            andGroupItem = andsGrouped[i]
3822            if andGroupItem == Lexeme.orBar:
3823                if i == len(andsGrouped) - 1:
3824                    raise ParseError(
3825                        f"OR at end of group in tokens:"
3826                        f"\n{tokenGroups}"
3827                        f"Which had been grouped into:"
3828                        f"\n{andsGrouped}"
3829                    )
3830                itemAfter = andsGrouped[i + 1]
3831                if isinstance(itemAfter, Lexeme):
3832                    raise ParseError(
3833                        f"Lexeme after OR in of group in tokens:"
3834                        f"\n{tokenGroups}"
3835                        f"Which had been grouped into:"
3836                        f"\n{andsGrouped}"
3837                    )
3838                assert isinstance(itemAfter, (base.Requirement, list))
3839                prev = finalResult[-1]
3840                if (
3841                    isinstance(prev, list)
3842                and len(prev) > 2
3843                and prev[1] == Lexeme.orBar
3844                ):
3845                    prev.extend(andsGrouped[i:i + 2])
3846                    i += 1  # with an extra +1 below
3847                else:
3848                    finalResult.append(
3849                        [finalResult.pop()] + andsGrouped[i:i + 2]
3850                    )
3851                    i += 1 # extra +1 below
3852            elif isinstance(andGroupItem, Lexeme):
3853                raise ParseError(
3854                    f"Leftover lexeme when grouping ORs at index {i}"
3855                    f" in grouped tokens:\n{andsGrouped}"
3856                    f"\nOriginal tokens were:\n{tokenGroups}"
3857                )
3858            else:
3859                finalResult.append(andGroupItem)
3860            i += 1
3861
3862        return finalResult
3863
3864    def parseRequirementFromRegroupedTokens(
3865        self,
3866        reqGroups: GroupedRequirementParts
3867    ) -> base.Requirement:
3868        """
3869        Recursive parser that works once tokens have been turned into
3870        requirements at the leaves and grouped by operator precedence
3871        otherwise (see `groupReqTokensByPrecedence`).
3872
3873        TODO: Simply by just doing this while grouping... ?
3874        """
3875        if len(reqGroups) == 0:
3876            raise ParseError("Ran out of tokens.")
3877
3878        elif len(reqGroups) == 1:
3879            only = reqGroups[0]
3880            if isinstance(only, list):
3881                return self.parseRequirementFromRegroupedTokens(only)
3882            elif isinstance(only, base.Requirement):
3883                return only
3884            else:
3885                raise ParseError(f"Invalid singleton group:\n{only}")
3886        elif reqGroups[0] == Lexeme.notMarker:
3887            if (
3888                not all(x == Lexeme.notMarker for x in reqGroups[:-1])
3889             or not isinstance(reqGroups[-1], (list, base.Requirement))
3890            ):
3891                raise ParseError(f"Invalid negation group:\n{reqGroups}")
3892            result = reqGroups[-1]
3893            if isinstance(result, list):
3894                result = self.parseRequirementFromRegroupedTokens(result)
3895            assert isinstance(result, base.Requirement)
3896            for i in range(len(reqGroups) - 1):
3897                result = base.ReqNot(result)
3898            return result
3899        elif len(reqGroups) % 2 == 0:
3900            raise ParseError(f"Even-length non-negation group:\n{reqGroups}")
3901        else:
3902            if (
3903                reqGroups[1] not in (Lexeme.ampersand, Lexeme.orBar)
3904             or not all(
3905                    reqGroups[i] == reqGroups[1]
3906                    for i in range(1, len(reqGroups), 2)
3907                )
3908            ):
3909                raise ParseError(
3910                    f"Inconsistent operator(s) in group:\n{reqGroups}"
3911                )
3912            op = reqGroups[1]
3913            operands = [
3914                (
3915                    self.parseRequirementFromRegroupedTokens(x)
3916                    if isinstance(x, list)
3917                    else x
3918                )
3919                for x in reqGroups[::2]
3920            ]
3921            if not all(isinstance(x, base.Requirement) for x in operands):
3922                raise ParseError(
3923                    f"Item not reducible to Requirement in AND group:"
3924                    f"\n{reqGroups}"
3925                )
3926            reqSequence = cast(Sequence[base.Requirement], operands)
3927            if op == Lexeme.ampersand:
3928                return base.ReqAll(reqSequence).flatten()
3929            else:
3930                assert op == Lexeme.orBar
3931                return base.ReqAny(reqSequence).flatten()
3932
3933    def parseRequirementFromGroupedTokens(
3934        self,
3935        tokenGroups: GroupedTokens
3936    ) -> base.Requirement:
3937        """
3938        Parses a `base.Requirement` from a pre-grouped tokens list (see
3939        `groupReqTokens`). Uses the 'orBar', 'ampersand', 'notMarker',
3940        'tokenCount', and 'mechanismSeparator' `Lexeme`s to provide
3941        'or', 'and', and 'not' operators along with distinguishing
3942        between capabilities, tokens, and mechanisms.
3943
3944        Precedence ordering is not, then and, then or, but you are
3945        encouraged to use parentheses for explicit grouping (the
3946        'openParen' and 'closeParen' `Lexeme`s, although these must be
3947        handled by `groupReqTokens` so this function won't see them
3948        directly).
3949
3950        You can also use 'X' (without quotes) for a never-satisfied
3951        requirement, and 'O' (without quotes) for an always-satisfied
3952        requirement.
3953
3954        Note that when '!' is applied to a token requirement it flips
3955        the sense of the integer from 'must have at least this many' to
3956        'must have strictly less than this many'.
3957
3958        Raises a `ParseError` if the grouped tokens it is given cannot
3959        be parsed as a `Requirement`.
3960
3961        Examples:
3962
3963        >>> pf = ParseFormat()
3964        >>> pf.parseRequirementFromGroupedTokens(['capability'])
3965        ReqCapability('capability')
3966        >>> pf.parseRequirementFromGroupedTokens(
3967        ...     ['token', Lexeme.tokenCount, '3']
3968        ... )
3969        ReqTokens('token', 3)
3970        >>> pf.parseRequirementFromGroupedTokens(
3971        ...     ['mechanism', Lexeme.mechanismSeparator, 'state']
3972        ... )
3973        ReqMechanism('mechanism', 'state')
3974        >>> pf.parseRequirementFromGroupedTokens(
3975        ...     ['capability', Lexeme.orBar, 'token',
3976        ...      Lexeme.tokenCount, '3']
3977        ... )
3978        ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
3979        >>> pf.parseRequirementFromGroupedTokens(
3980        ...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
3981        ... )
3982        ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]),\
3983 ReqCapability('three')])
3984        >>> pf.parseRequirementFromGroupedTokens(
3985        ...     [
3986        ...         'one',
3987        ...         Lexeme.ampersand,
3988        ...         [
3989        ...              'two',
3990        ...              Lexeme.orBar,
3991        ...              'three'
3992        ...         ]
3993        ...     ]
3994        ... )
3995        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
3996 ReqCapability('three')])])
3997        >>> pf.parseRequirementFromTokens(['X'])
3998        ReqImpossible()
3999        >>> pf.parseRequirementFromTokens(['O'])
4000        ReqNothing()
4001        >>> pf.parseRequirementFromTokens(
4002        ...     [Lexeme.openParen, 'O', Lexeme.closeParen]
4003        ... )
4004        ReqNothing()
4005        """
4006        if len(tokenGroups) == 0:
4007            raise ParseError("Ran out of tokens.")
4008
4009        reGrouped = self.groupReqTokensByPrecedence(tokenGroups)
4010
4011        return self.parseRequirementFromRegroupedTokens(reGrouped)
4012
4013    def parseRequirementFromTokens(
4014        self,
4015        tokens: LexedTokens,
4016        start: int = 0,
4017        end: int = -1
4018    ) -> base.Requirement:
4019        """
4020        Parses a requirement from `LexedTokens` by grouping them first
4021        and then using `parseRequirementFromGroupedTokens`.
4022
4023        For example:
4024
4025        >>> pf = ParseFormat()
4026        >>> pf.parseRequirementFromTokens(
4027        ...     [
4028        ...         'one',
4029        ...         Lexeme.ampersand,
4030        ...         Lexeme.openParen,
4031        ...         'two',
4032        ...         Lexeme.orBar,
4033        ...         'three',
4034        ...         Lexeme.closeParen
4035        ...     ]
4036        ... )
4037        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4038 ReqCapability('three')])])
4039        """
4040        grouped = self.groupReqTokens(tokens, start, end)
4041        return self.parseRequirementFromGroupedTokens(grouped)
4042
4043    def parseRequirement(self, encoded: str) -> base.Requirement:
4044        """
4045        Parses a `base.Requirement` from a string by calling `lex` and
4046        then feeding it into `ParseFormat.parseRequirementFromTokens`.
4047        As stated in `parseRequirementFromTokens`, the precedence
4048        binding order is NOT, then AND, then OR.
4049
4050        For example:
4051
4052        >>> pf = ParseFormat()
4053        >>> pf.parseRequirement('! coin * 3')
4054        ReqNot(ReqTokens('coin', 3))
4055        >>> pf.parseRequirement(
4056        ...     '  oneWord | "two words"|"three  words words" '
4057        ... )
4058        ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'),\
4059 ReqCapability('"three  words words"')])
4060        >>> pf.parseRequirement('words-with-dashes')
4061        ReqCapability('words-with-dashes')
4062        >>> r = pf.parseRequirement('capability&roomB::switch:on')
4063        >>> r
4064        ReqAll([ReqCapability('capability'),\
4065 ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB',\
4066 name='switch'), 'on')])
4067        >>> r.unparse()
4068        '(capability&roomB::switch:on)'
4069        >>> pf.parseRequirement('!!!one')
4070        ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
4071        >>> pf.parseRequirement('domain//zone::where::mechanism:state')
4072        ReqMechanism(MechanismSpecifier(domain='domain', zone='zone',\
4073 decision='where', name='mechanism'), 'state')
4074        >>> pf.parseRequirement('domain//mechanism:state')
4075        ReqMechanism(MechanismSpecifier(domain='domain', zone=None,\
4076 decision=None, name='mechanism'), 'state')
4077        >>> pf.parseRequirement('where::mechanism:state')
4078        ReqMechanism(MechanismSpecifier(domain=None, zone=None,\
4079 decision='where', name='mechanism'), 'state')
4080        >>> pf.parseRequirement('zone::where::mechanism:state')
4081        ReqMechanism(MechanismSpecifier(domain=None, zone='zone',\
4082 decision='where', name='mechanism'), 'state')
4083        >>> pf.parseRequirement('tag~')
4084        ReqTag('tag', 1)
4085        >>> pf.parseRequirement('tag~&tag2~')
4086        ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
4087        >>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
4088        ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3),\
4089 ReqTag('tag', 3.5), ReqLevel('skill', 3)])
4090        >>> pf.parseRequirement('tag~True|tag~False|tag~None')
4091        ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])
4092
4093        Precedence examples:
4094
4095        >>> pf.parseRequirement('A|B&C')
4096        ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'),\
4097 ReqCapability('C')])])
4098        >>> pf.parseRequirement('A&B|C')
4099        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4100 ReqCapability('C')])
4101        >>> pf.parseRequirement('(A&B)|C')
4102        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4103 ReqCapability('C')])
4104        >>> pf.parseRequirement('(A&B|C)&D')
4105        ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4106 ReqCapability('C')]), ReqCapability('D')])
4107
4108        Error examples:
4109
4110        >>> pf.parseRequirement('one ! Word')
4111        Traceback (most recent call last):
4112        ...
4113        exploration.parsing.ParseError...
4114        >>> pf.parseRequirement('a|')
4115        Traceback (most recent call last):
4116        ...
4117        exploration.parsing.ParseError...
4118        >>> pf.parseRequirement('b!')
4119        Traceback (most recent call last):
4120        ...
4121        exploration.parsing.ParseError...
4122        >>> pf.parseRequirement('*emph*')
4123        Traceback (most recent call last):
4124        ...
4125        exploration.parsing.ParseError...
4126        >>> pf.parseRequirement('one&&two')
4127        Traceback (most recent call last):
4128        ...
4129        exploration.parsing.ParseError...
4130        >>> pf.parseRequirement('one!|two')
4131        Traceback (most recent call last):
4132        ...
4133        exploration.parsing.ParseError...
4134        >>> pf.parseRequirement('one*two')
4135        Traceback (most recent call last):
4136        ...
4137        exploration.parsing.ParseError...
4138        >>> pf.parseRequirement('one*')
4139        Traceback (most recent call last):
4140        ...
4141        exploration.parsing.ParseError...
4142        >>> pf.parseRequirement('()')
4143        Traceback (most recent call last):
4144        ...
4145        exploration.parsing.ParseError...
4146        >>> pf.parseRequirement('(one)*3')
4147        Traceback (most recent call last):
4148        ...
4149        exploration.parsing.ParseError...
4150        >>> pf.parseRequirement('a:')
4151        Traceback (most recent call last):
4152        ...
4153        exploration.parsing.ParseError...
4154        >>> pf.parseRequirement('a:b:c')
4155        Traceback (most recent call last):
4156        ...
4157        exploration.parsing.ParseError...
4158        >>> pf.parseRequirement('where::capability')
4159        Traceback (most recent call last):
4160        ...
4161        exploration.parsing.ParseError...
4162        """
4163        return self.parseRequirementFromTokens(
4164            lex(encoded, self.reverseFormat)
4165        )
4166
4167    def parseSkillCombinationFromTokens(
4168        self,
4169        tokens: LexedTokens,
4170        start: int = 0,
4171        end: int = -1
4172    ) -> Union[base.Skill, base.SkillCombination]:
4173        """
4174        Parses a skill combination from the specified range within the
4175        given tokens list. If just a single string token is selected, it
4176        will be returned as a `base.BestSkill` with just that skill
4177        inside.
4178
4179        For example:
4180
4181        >>> pf = ParseFormat()
4182        >>> pf.parseSkillCombinationFromTokens(['climbing'])
4183        BestSkill('climbing')
4184        >>> tokens = [
4185        ...     'best',
4186        ...     Lexeme.openParen,
4187        ...     'brains',
4188        ...     Lexeme.sepOrDelay,
4189        ...     'brawn',
4190        ...     Lexeme.closeParen,
4191        ... ]
4192        >>> pf.parseSkillCombinationFromTokens(tokens)
4193        BestSkill('brains', 'brawn')
4194        >>> tokens[2] = '3'  # not a lexeme so it's a string
4195        >>> pf.parseSkillCombinationFromTokens(tokens)
4196        BestSkill(3, 'brawn')
4197        >>> tokens = [
4198        ...     Lexeme.wigglyLine,
4199        ...     Lexeme.wigglyLine,
4200        ...     'yes',
4201        ... ]
4202        >>> pf.parseSkillCombinationFromTokens(tokens)
4203        InverseSkill(InverseSkill('yes'))
4204        """
4205        start, end, nTokens = normalizeEnds(tokens, start, end)
4206
4207        first = tokens[start]
4208        if nTokens == 1:
4209            if isinstance(first, base.Skill):
4210                try:
4211                    level = int(first)
4212                    return base.BestSkill(level)
4213                except ValueError:
4214                    return base.BestSkill(first)
4215            else:
4216                raise ParseError(
4217                    "Invalid SkillCombination:\n{tokens[start:end + 1]"
4218                )
4219
4220        if first == Lexeme.wigglyLine:
4221            inv = self.parseSkillCombinationFromTokens(
4222                tokens,
4223                start + 1,
4224                end
4225            )
4226            if isinstance(inv, base.BestSkill) and len(inv.skills) == 1:
4227                return base.InverseSkill(inv.skills[0])
4228            else:
4229                return base.InverseSkill(inv)
4230
4231        second = tokens[start + 1]
4232        if second != Lexeme.openParen:
4233            raise ParseError(
4234                f"Invalid SkillCombination (missing paren):"
4235                f"\n{tokens[start:end + 1]}"
4236            )
4237
4238        parenEnd = self.matchingBrace(
4239            tokens,
4240            start + 1,
4241            Lexeme.openParen,
4242            Lexeme.closeParen
4243        )
4244        if parenEnd != end:
4245            raise ParseError(
4246                f"Extra junk after SkillCombination:"
4247                f"\n{tokens[parenEnd + 1:end + 1]}"
4248            )
4249
4250        if first == 'if':
4251            parts = list(
4252                findSeparatedParts(
4253                    tokens,
4254                    Lexeme.sepOrDelay,
4255                    start + 2,
4256                    end - 1,
4257                    Lexeme.openParen,
4258                    Lexeme.closeParen
4259                )
4260            )
4261            if len(parts) != 3:
4262                raise ParseError(
4263                    f"Wrong number of parts for ConditionalSkill (needs"
4264                    f" 3, got {len(parts)}:"
4265                    f"\n{tokens[start + 2:end]}"
4266                )
4267            reqStart, reqEnd = parts[0]
4268            ifStart, ifEnd = parts[1]
4269            elseStart, elseEnd = parts[2]
4270            return base.ConditionalSkill(
4271                self.parseRequirementFromTokens(tokens, reqStart, reqEnd),
4272                self.parseSkillCombinationFromTokens(tokens, ifStart, ifEnd),
4273                self.parseSkillCombinationFromTokens(
4274                    tokens,
4275                    elseStart,
4276                    elseEnd
4277                ),
4278            )
4279        elif first in ('sum', 'best', 'worst'):
4280            make: type[base.SkillCombination]
4281            if first == 'sum':
4282                make = base.CombinedSkill
4283            elif first == 'best':
4284                make = base.BestSkill
4285            else:
4286                make = base.WorstSkill
4287
4288            subs = []
4289            for partStart, partEnd in findSeparatedParts(
4290                tokens,
4291                Lexeme.sepOrDelay,
4292                start + 2,
4293                end - 1,
4294                Lexeme.openParen,
4295                Lexeme.closeParen
4296            ):
4297                sub = self.parseSkillCombinationFromTokens(
4298                    tokens,
4299                    partStart,
4300                    partEnd
4301                )
4302                if (
4303                    isinstance(sub, base.BestSkill)
4304                and len(sub.skills) == 1
4305                ):
4306                    subs.append(sub.skills[0])
4307                else:
4308                    subs.append(sub)
4309
4310            return make(*subs)
4311        else:
4312            raise ParseError(
4313                "Invalid SkillCombination:\n{tokens[start:end + 1]"
4314            )
4315
4316    def parseSkillCombination(
4317        self,
4318        encoded: str
4319    ) -> base.SkillCombination:
4320        """
4321        Parses a `SkillCombination` from a string. Calls `lex` and then
4322        `parseSkillCombinationFromTokens`.
4323        """
4324        result = self.parseSkillCombinationFromTokens(
4325            lex(encoded, self.reverseFormat)
4326        )
4327        if not isinstance(result, base.SkillCombination):
4328            return base.BestSkill(result)
4329        else:
4330            return result
4331
4332    def parseConditionFromTokens(
4333        self,
4334        tokens: LexedTokens,
4335        start: int = 0,
4336        end: int = -1
4337    ) -> base.Condition:
4338        """
4339        Parses a `base.Condition` from a lexed tokens list. For example:
4340
4341        >>> pf = ParseFormat()
4342        >>> tokens = [
4343        ...     Lexeme.doubleQuestionmark,
4344        ...     Lexeme.openParen,
4345        ...     "fire",
4346        ...     Lexeme.ampersand,
4347        ...     "water",
4348        ...     Lexeme.closeParen,
4349        ...     Lexeme.openCurly,
4350        ...     "gain",
4351        ...     "wind",
4352        ...     Lexeme.closeCurly,
4353        ...     Lexeme.openCurly,
4354        ...     Lexeme.closeCurly,
4355        ... ]
4356        >>> pf.parseConditionFromTokens(tokens) == base.condition(
4357        ...     condition=base.ReqAll([
4358        ...         base.ReqCapability('fire'),
4359        ...         base.ReqCapability('water')
4360        ...     ]),
4361        ...     consequence=[base.effect(gain='wind')]
4362        ... )
4363        True
4364        """
4365        start, end, nTokens = normalizeEnds(tokens, start, end)
4366        if nTokens < 8:
4367            raise ParseError(
4368                f"A Condition requires at least 8 tokens (got {nTokens})."
4369            )
4370        if tokens[start] != Lexeme.doubleQuestionmark:
4371            raise ParseError(
4372                f"A Condition must start with"
4373                f" {repr(self.formatDict[Lexeme.doubleQuestionmark])}"
4374            )
4375        try:
4376            consequenceStart = tokens.index(Lexeme.openCurly, start)
4377        except ValueError:
4378            raise ParseError("A condition must include a consequence block.")
4379        consequenceEnd = self.matchingBrace(tokens, consequenceStart)
4380        altStart = consequenceEnd + 1
4381        altEnd = self.matchingBrace(tokens, altStart)
4382
4383        if altEnd != end:
4384            raise ParseError(
4385                f"Junk after condition:\n{tokens[altEnd + 1: end + 1]}"
4386            )
4387
4388        return base.condition(
4389            condition=self.parseRequirementFromTokens(
4390                tokens,
4391                start + 1,
4392                consequenceStart - 1
4393            ),
4394            consequence=self.parseConsequenceFromTokens(
4395                tokens,
4396                consequenceStart,
4397                consequenceEnd
4398            ),
4399            alternative=self.parseConsequenceFromTokens(
4400                tokens,
4401                altStart,
4402                altEnd
4403            )
4404        )
4405
4406    def parseCondition(
4407        self,
4408        encoded: str
4409    ) -> base.Condition:
4410        """
4411        Lexes the given string and then calls `parseConditionFromTokens`
4412        to return a `base.Condition`.
4413        """
4414        return self.parseConditionFromTokens(
4415            lex(encoded, self.reverseFormat)
4416        )
4417
4418    def parseChallengeFromTokens(
4419        self,
4420        tokens: LexedTokens,
4421        start: int = 0,
4422        end: int = -1
4423    ) -> base.Challenge:
4424        """
4425        Parses a `base.Challenge` from a lexed tokens list.
4426
4427        For example:
4428
4429        >>> pf = ParseFormat()
4430        >>> tokens = [
4431        ...     Lexeme.angleLeft,
4432        ...     '2',
4433        ...     Lexeme.angleRight,
4434        ...     'best',
4435        ...     Lexeme.openParen,
4436        ...     "chess",
4437        ...     Lexeme.sepOrDelay,
4438        ...     "checkers",
4439        ...     Lexeme.closeParen,
4440        ...     Lexeme.openCurly,
4441        ...     "gain",
4442        ...     "coin",
4443        ...     Lexeme.tokenCount,
4444        ...     "5",
4445        ...     Lexeme.closeCurly,
4446        ...     Lexeme.angleRight,
4447        ...     Lexeme.openCurly,
4448        ...     "lose",
4449        ...     "coin",
4450        ...     Lexeme.tokenCount,
4451        ...     "5",
4452        ...     Lexeme.closeCurly,
4453        ... ]
4454        >>> c = pf.parseChallengeFromTokens(tokens)
4455        >>> c['skills'] == base.BestSkill('chess', 'checkers')
4456        True
4457        >>> c['level']
4458        2
4459        >>> c['success'] == [base.effect(gain=('coin', 5))]
4460        True
4461        >>> c['failure'] == [base.effect(lose=('coin', 5))]
4462        True
4463        >>> c['outcome']
4464        False
4465        >>> c == base.challenge(
4466        ...     skills=base.BestSkill('chess', 'checkers'),
4467        ...     level=2,
4468        ...     success=[base.effect(gain=('coin', 5))],
4469        ...     failure=[base.effect(lose=('coin', 5))],
4470        ...     outcome=False
4471        ... )
4472        True
4473        >>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
4474        >>> c == pf.parseChallengeFromTokens(t2, 1, -2)
4475        True
4476        """
4477        start, end, nTokens = normalizeEnds(tokens, start, end)
4478        if nTokens < 8:
4479            raise ParseError(
4480                f"Not enough tokens for a challenge: {nTokens}"
4481            )
4482        if tokens[start] != Lexeme.angleLeft:
4483            raise ParseError(
4484                f"Challenge must start with"
4485                f" {repr(self.formatDict[Lexeme.angleLeft])}"
4486            )
4487        levelStr = tokens[start + 1]
4488        if isinstance(levelStr, Lexeme):
4489            raise ParseError(
4490                f"Challenge must start with a level in angle brackets"
4491                f" (got {repr(self.formatDict[levelStr])})."
4492            )
4493        if tokens[start + 2] != Lexeme.angleRight:
4494            raise ParseError(
4495                f"Challenge must include"
4496                f" {repr(self.formatDict[Lexeme.angleRight])} after"
4497                f" the level."
4498            )
4499        try:
4500            level = int(levelStr)
4501        except ValueError:
4502            raise ParseError(
4503                f"Challenge level must be an integer (got"
4504                f" {repr(tokens[start + 1])}."
4505            )
4506        try:
4507            successStart = tokens.index(Lexeme.openCurly, start)
4508            skillsEnd = successStart - 1
4509        except ValueError:
4510            raise ParseError("A challenge must include a consequence block.")
4511
4512        outcome: Optional[bool] = None
4513        if tokens[skillsEnd] == Lexeme.angleRight:
4514            skillsEnd -= 1
4515            outcome = True
4516        successEnd = self.matchingBrace(tokens, successStart)
4517        failStart = successEnd + 1
4518        if tokens[failStart] == Lexeme.angleRight:
4519            failStart += 1
4520            if outcome is not None:
4521                raise ParseError(
4522                    "Cannot indicate both success and failure as"
4523                    " outcomes in a challenge."
4524                )
4525            outcome = False
4526        failEnd = self.matchingBrace(tokens, failStart)
4527
4528        if failEnd != end:
4529            raise ParseError(
4530                f"Junk after condition:\n{tokens[failEnd + 1:end + 1]}"
4531            )
4532
4533        skills = self.parseSkillCombinationFromTokens(
4534            tokens,
4535            start + 3,
4536            skillsEnd
4537        )
4538        if isinstance(skills, base.Skill):
4539            skills = base.BestSkill(skills)
4540
4541        return base.challenge(
4542            level=level,
4543            outcome=outcome,
4544            skills=skills,
4545            success=self.parseConsequenceFromTokens(
4546                tokens[successStart:successEnd + 1]
4547            ),
4548            failure=self.parseConsequenceFromTokens(
4549                tokens[failStart:failEnd + 1]
4550            )
4551        )
4552
4553    def parseChallenge(
4554        self,
4555        encoded: str
4556    ) -> base.Challenge:
4557        """
4558        Lexes the given string and then calls `parseChallengeFromTokens`
4559        to return a `base.Challenge`.
4560        """
4561        return self.parseChallengeFromTokens(
4562            lex(encoded, self.reverseFormat)
4563        )
4564
4565    def parseConsequenceFromTokens(
4566        self,
4567        tokens: LexedTokens,
4568        start: int = 0,
4569        end: int = -1
4570    ) -> base.Consequence:
4571        """
4572        Parses a consequence from a lexed token list. If start and/or end
4573        are specified, only processes the part of the list between those
4574        two indices (inclusive). Use `lex` to turn a string into a
4575        `LexedTokens` list (or use `ParseFormat.parseConsequence` which
4576        does that for you).
4577
4578        An example:
4579
4580        >>> pf = ParseFormat()
4581        >>> tokens = [
4582        ...     Lexeme.openCurly,
4583        ...     'gain',
4584        ...     'power',
4585        ...     Lexeme.closeCurly
4586        ... ]
4587        >>> c = pf.parseConsequenceFromTokens(tokens)
4588        >>> c == [base.effect(gain='power')]
4589        True
4590        >>> tokens.append('hi')
4591        >>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
4592        True
4593        >>> c == pf.parseConsequenceFromTokens(tokens, end=3)
4594        True
4595        """
4596        start, end, nTokens = normalizeEnds(tokens, start, end)
4597
4598        if nTokens < 2:
4599            raise ParseError("Consequence must have at least two tokens.")
4600
4601        if tokens[start] != Lexeme.openCurly:
4602            raise ParseError(
4603                f"Consequence must start with an open curly brace:"
4604                f" {repr(self.formatDict[Lexeme.openCurly])}."
4605            )
4606
4607        if tokens[end] != Lexeme.closeCurly:
4608            raise ParseError(
4609                f"Consequence must end with a closing curly brace:"
4610                f" {repr(self.formatDict[Lexeme.closeCurly])}."
4611            )
4612
4613        if nTokens == 2:
4614            return []
4615
4616        result: base.Consequence = []
4617        for partStart, partEnd in findSeparatedParts(
4618            tokens,
4619            Lexeme.consequenceSeparator,
4620            start + 1,
4621            end - 1,
4622            Lexeme.openCurly,
4623            Lexeme.closeCurly
4624        ):
4625            if partEnd - partStart < 0:
4626                raise ParseError("Empty consequence part.")
4627            if tokens[partStart] == Lexeme.angleLeft:  # a challenge
4628                result.append(
4629                    self.parseChallengeFromTokens(
4630                        tokens,
4631                        partStart,
4632                        partEnd
4633                    )
4634                )
4635            elif tokens[partStart] == Lexeme.doubleQuestionmark:  # condition
4636                result.append(
4637                    self.parseConditionFromTokens(
4638                        tokens,
4639                        partStart,
4640                        partEnd
4641                    )
4642                )
4643            else:  # Must be an effect
4644                result.append(
4645                    self.parseEffectFromTokens(
4646                        tokens,
4647                        partStart,
4648                        partEnd
4649                    )
4650                )
4651
4652        return result
4653
4654    def parseConsequence(self, encoded: str) -> base.Consequence:
4655        """
4656        Parses a consequence from a string. Uses `lex` and
4657        `ParseFormat.parseConsequenceFromTokens`. For example:
4658
4659        >>> pf = ParseFormat()
4660        >>> c = pf.parseConsequence(
4661        ...   '{gain power}'
4662        ... )
4663        >>> c == [base.effect(gain='power')]
4664        True
4665        >>> pf.unparseConsequence(c)
4666        '{gain power}'
4667        >>> c = pf.parseConsequence(
4668        ...     '{\\n'
4669        ...     '    ??(brawny|!weights*3){\\n'
4670        ...     '        <3>sum(brains, brawn){goto home}>{bounce}\\n'
4671        ...     '    }{};\\n'
4672        ...     '    lose coin*1\\n'
4673        ...     '}'
4674        ... )
4675        >>> len(c)
4676        2
4677        >>> c[0]['condition'] == base.ReqAny([
4678        ...     base.ReqCapability('brawny'),
4679        ...     base.ReqNot(base.ReqTokens('weights', 3))
4680        ... ])
4681        True
4682        >>> len(c[0]['consequence'])
4683        1
4684        >>> len(c[0]['alternative'])
4685        0
4686        >>> cons = c[0]['consequence'][0]
4687        >>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
4688        True
4689        >>> cons['level']
4690        3
4691        >>> len(cons['success'])
4692        1
4693        >>> len(cons['failure'])
4694        1
4695        >>> cons['success'][0] == base.effect(goto='home')
4696        True
4697        >>> cons['failure'][0] == base.effect(bounce=True)
4698        True
4699        >>> cons['outcome'] = False
4700        >>> c[0] == base.condition(
4701        ...     condition=base.ReqAny([
4702        ...         base.ReqCapability('brawny'),
4703        ...         base.ReqNot(base.ReqTokens('weights', 3))
4704        ...     ]),
4705        ...     consequence=[
4706        ...         base.challenge(
4707        ...             skills=base.CombinedSkill('brains', 'brawn'),
4708        ...             level=3,
4709        ...             success=[base.effect(goto='home')],
4710        ...             failure=[base.effect(bounce=True)],
4711        ...             outcome=False
4712        ...         )
4713        ...     ]
4714        ... )
4715        True
4716        >>> c[1] == base.effect(lose=('coin', 1))
4717        True
4718        """
4719        return self.parseConsequenceFromTokens(
4720            lex(encoded, self.reverseFormat)
4721        )

A ParseFormat manages the mapping from markers to entry types and vice versa.

ParseFormat( formatDict: Dict[Lexeme, str] = {<Lexeme.domainSeparator: 1>: '//', <Lexeme.zoneSeparator: 2>: '::', <Lexeme.partSeparator: 3>: '%%', <Lexeme.stateOn: 4>: '=on', <Lexeme.stateOff: 5>: '=off', <Lexeme.tokenCount: 6>: '*', <Lexeme.effectCharges: 7>: '=', <Lexeme.sepOrDelay: 8>: ',', <Lexeme.consequenceSeparator: 9>: ';', <Lexeme.inCommon: 10>: '+c', <Lexeme.isHidden: 11>: '+h', <Lexeme.skillLevel: 12>: '^', <Lexeme.wigglyLine: 13>: '~', <Lexeme.withDetails: 14>: '%', <Lexeme.reciprocalSeparator: 15>: '/', <Lexeme.mechanismSeparator: 16>: ':', <Lexeme.openCurly: 17>: '{', <Lexeme.closeCurly: 18>: '}', <Lexeme.openParen: 19>: '(', <Lexeme.closeParen: 20>: ')', <Lexeme.angleLeft: 21>: '<', <Lexeme.angleRight: 22>: '>', <Lexeme.doubleQuestionmark: 23>: '??', <Lexeme.ampersand: 24>: '&', <Lexeme.orBar: 25>: '|', <Lexeme.notMarker: 26>: '!'}, effectNames: Dict[str, Literal['gain', 'lose', 'set', 'toggle', 'deactivate', 'edit', 'goto', 'bounce', 'follow', 'save']] = {'gain': 'gain', 'lose': 'lose', 'set': 'set', 'toggle': 'toggle', 'deactivate': 'deactivate', 'edit': 'edit', 'goto': 'goto', 'bounce': 'bounce', 'follow': 'follow', 'save': 'save'}, focalizationNames: Dict[str, Literal['singular', 'plural', 'spreading']] = {'singular': 'singular', 'plural': 'plural', 'spreading': 'spreading'}, successFailureIndicators: Tuple[str, str] = ('s', 'f'))
590    def __init__(
591        self,
592        formatDict: Format = DEFAULT_FORMAT,
593        effectNames: Dict[str, base.EffectType] = DEFAULT_EFFECT_NAMES,
594        focalizationNames: Dict[
595            str,
596            base.DomainFocalization
597        ] = DEFAULT_FOCALIZATION_NAMES,
598        successFailureIndicators: Tuple[str, str] = DEFAULT_SF_INDICATORS
599    ):
600        """
601        Sets up the parsing format. Requires a `Format` dictionary to
602        define the specifics. Raises a `ValueError` unless the keys of
603        the `Format` dictionary exactly match the `Lexeme` values.
604        """
605        self.formatDict = formatDict
606        self.effectNames = effectNames
607        self.focalizationNames = focalizationNames
608        if (
609            len(successFailureIndicators) != 2
610        or any(len(i) != 1 for i in successFailureIndicators)
611        ):
612            raise ValueError(
613                f"Invalid success/failure indicators: must be a pair of"
614                f" length-1 strings. Got: {successFailureIndicators!r}"
615            )
616        self.successIndicator, self.failureIndicator = (
617            successFailureIndicators
618        )
619
620        # Check completeness for each dictionary
621        checkCompleteness('formatDict', self.formatDict, set(Lexeme))
622        checkCompleteness(
623            'effectNames',
624            self.effectNames,
625            valuesSet=set(get_args(base.EffectType))
626        )
627        checkCompleteness(
628            'focalizationNames',
629            self.focalizationNames,
630            valuesSet=set(get_args(base.DomainFocalization))
631        )
632
633        # Build some reverse lookup dictionaries for specific
634        self.reverseFormat = {y: x for (x, y) in self.formatDict.items()}
635
636        # circumstances:
637        self.effectModMap = {
638            self.formatDict[x]: x
639            for x in [
640                Lexeme.effectCharges,
641                Lexeme.sepOrDelay,
642                Lexeme.inCommon,
643                Lexeme.isHidden
644            ]
645        }

Sets up the parsing format. Requires a Format dictionary to define the specifics. Raises a ValueError unless the keys of the Format dictionary exactly match the Lexeme values.

formatDict
effectNames
focalizationNames
reverseFormat
effectModMap
def lex(self, content: str) -> List[Union[Lexeme, str]]:
647    def lex(self, content: str) -> LexedTokens:
648        """
649        Applies `lex` using this format's lexeme mapping.
650        """
651        return lex(content, self.reverseFormat)

Applies lex using this format's lexeme mapping.

def onOff(self, word: str) -> Optional[bool]:
653    def onOff(self, word: str) -> Optional[bool]:
654        """
655        Parse an on/off indicator and returns a boolean (`True` for on
656        and `False` for off). Returns `None` if the word isn't either
657        the 'on' or the 'off' word. Generates a `ParseWarning`
658        (and still returns `None`) if the word is a case-swapped version
659        of the 'on' or 'off' word and is not equal to either of them.
660        """
661        onWord = self.formatDict[Lexeme.stateOn]
662        offWord = self.formatDict[Lexeme.stateOff]
663
664        # Generate warning if we suspect a case error
665        if (
666            word.casefold() in (onWord, offWord)
667        and word not in (onWord, offWord)
668        ):
669            warnings.warn(
670                (
671                    f"Word '{word}' cannot be interpreted as an on/off"
672                    f" value, although it is almost one (the correct"
673                    f" values are '{onWord}' and '{offWord}'."
674                ),
675                ParseWarning
676            )
677
678        # return the appropriate value
679        if word == onWord:
680            return True
681        elif word == offWord:
682            return False
683        else:
684            return None

Parse an on/off indicator and returns a boolean (True for on and False for off). Returns None if the word isn't either the 'on' or the 'off' word. Generates a ParseWarning (and still returns None) if the word is a case-swapped version of the 'on' or 'off' word and is not equal to either of them.

def matchingBrace( self, tokens: List[Union[Lexeme, str]], where: int, opener: int = <Lexeme.openCurly: 17>, closer: int = <Lexeme.closeCurly: 18>) -> int:
686    def matchingBrace(
687        self,
688        tokens: LexedTokens,
689        where: int,
690        opener: int = Lexeme.openCurly,
691        closer: int = Lexeme.closeCurly
692    ) -> int:
693        """
694        Returns the index within the given tokens list of the closing
695        curly brace which matches the open brace at the specified index.
696        You can specify custom `opener` and/or `closer` lexemes to find
697        matching pairs of other things. Raises a `ParseError` if there
698        is no opening brace at the specified index, or if there isn't a
699        matching closing brace. Handles nested braces of the specified
700        type.
701
702        Examples:
703        >>> pf = ParseFormat()
704        >>> ob = Lexeme.openCurly
705        >>> cb = Lexeme.closeCurly
706        >>> pf.matchingBrace([ob, cb], 0)
707        1
708        >>> pf.matchingBrace([ob, cb], 1)
709        Traceback (most recent call last):
710          ...
711        exploration.parsing.ParseError: ...
712        >>> pf.matchingBrace(['hi', ob, cb], 0)
713        Traceback (most recent call last):
714          ...
715        exploration.parsing.ParseError: ...
716        >>> pf.matchingBrace(['hi', ob, cb], 1)
717        2
718        >>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
719        3
720        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
721        Traceback (most recent call last):
722          ...
723        exploration.parsing.ParseError: ...
724        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
725        3
726        >>> pf.matchingBrace([ob, ob, cb, cb], 0)
727        3
728        >>> pf.matchingBrace([ob, ob, cb, cb], 1)
729        2
730        >>> pf.matchingBrace([ob, cb, ob, cb], 0)
731        1
732        >>> pf.matchingBrace([ob, cb, ob, cb], 2)
733        3
734        >>> pf.matchingBrace([ob, cb, cb, cb], 0)
735        1
736        >>> pf.matchingBrace([ob, ob, ob, cb], 0)
737        Traceback (most recent call last):
738          ...
739        exploration.parsing.ParseError: ...
740        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
741        7
742        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
743        6
744        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
745        Traceback (most recent call last):
746          ...
747        exploration.parsing.ParseError: ...
748        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
749        4
750        >>> op = Lexeme.openParen
751        >>> cp = Lexeme.closeParen
752        >>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
753        3
754        """
755        if where >= len(tokens):
756            raise ParseError(
757                f"Out-of-bounds brace start: index {where} with"
758                f" {len(tokens)} tokens."
759            )
760        if tokens[where] != opener:
761            raise ParseError(
762                f"Can't find matching brace for token"
763                f" {repr(tokens[where])} at index {where} because it's"
764                f" not an open brace."
765            )
766
767        level = 1
768        for i in range(where + 1, len(tokens)):
769            token = tokens[i]
770            if token == opener:
771                level += 1
772            elif token == closer:
773                level -= 1
774                if level == 0:
775                    return i
776
777        raise ParseError(
778            f"Failed to find matching curly brace from index {where}."
779        )

Returns the index within the given tokens list of the closing curly brace which matches the open brace at the specified index. You can specify custom opener and/or closer lexemes to find matching pairs of other things. Raises a ParseError if there is no opening brace at the specified index, or if there isn't a matching closing brace. Handles nested braces of the specified type.

Examples:

>>> pf = ParseFormat()
>>> ob = Lexeme.openCurly
>>> cb = Lexeme.closeCurly
>>> pf.matchingBrace([ob, cb], 0)
1
>>> pf.matchingBrace([ob, cb], 1)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace(['hi', ob, cb], 0)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace(['hi', ob, cb], 1)
2
>>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
3
>>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
3
>>> pf.matchingBrace([ob, ob, cb, cb], 0)
3
>>> pf.matchingBrace([ob, ob, cb, cb], 1)
2
>>> pf.matchingBrace([ob, cb, ob, cb], 0)
1
>>> pf.matchingBrace([ob, cb, ob, cb], 2)
3
>>> pf.matchingBrace([ob, cb, cb, cb], 0)
1
>>> pf.matchingBrace([ob, ob, ob, cb], 0)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
7
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
6
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
4
>>> op = Lexeme.openParen
>>> cp = Lexeme.closeParen
>>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
3
def parseFocalization(self, word: str) -> Literal['singular', 'plural', 'spreading']:
781    def parseFocalization(self, word: str) -> base.DomainFocalization:
782        """
783        Parses a focalization type for a domain, recognizing
784        'domainFocalizationSingular', 'domainFocalizationPlural', and
785        'domainFocalizationSpreading'.
786        """
787        try:
788            return self.focalizationNames[word]
789        except KeyError:
790            raise ParseError(
791                f"Invalid domain focalization name {repr(word)}. Valid"
792                f" name are: {repr(list(self.focalizationNames))}'."
793            )

Parses a focalization type for a domain, recognizing 'domainFocalizationSingular', 'domainFocalizationPlural', and 'domainFocalizationSpreading'.

def parseTagValue( self, value: str) -> Union[bool, int, float, str, list, dict, NoneType, exploration.base.Requirement, List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]]:
795    def parseTagValue(self, value: str) -> base.TagValue:
796        """
797        Converts a string to a tag value, following these rules:
798
799        1. If the string is exactly one of 'None', 'True', or 'False', we
800            convert it to the corresponding Python value.
801        2. If the string can be converted to an integer without raising a
802            ValueError, we use that integer.
803        3. If the string can be converted to a float without raising a
804            ValueError, we use that float.
805        4. Otherwise, it remains a string.
806
807        Note that there is currently no syntax for using list, dictionary,
808        Requirement, or Consequence tag values.
809        TODO: Support those types?
810
811        Examples:
812
813        >>> pf = ParseFormat()
814        >>> pf.parseTagValue('hi')
815        'hi'
816        >>> pf.parseTagValue('3')
817        3
818        >>> pf.parseTagValue('3.0')
819        3.0
820        >>> pf.parseTagValue('True')
821        True
822        >>> pf.parseTagValue('False')
823        False
824        >>> pf.parseTagValue('None') is None
825        True
826        >>> pf.parseTagValue('none')
827        'none'
828        """
829        # TODO: Allow these keywords to be redefined?
830        if value == 'True':
831            return True
832        elif value == 'False':
833            return False
834        elif value == 'None':
835            return None
836        else:
837            try:
838                return int(value)
839            except ValueError:
840                try:
841                    return float(value)
842                except ValueError:
843                    return value

Converts a string to a tag value, following these rules:

  1. If the string is exactly one of 'None', 'True', or 'False', we convert it to the corresponding Python value.
  2. If the string can be converted to an integer without raising a ValueError, we use that integer.
  3. If the string can be converted to a float without raising a ValueError, we use that float.
  4. Otherwise, it remains a string.

Note that there is currently no syntax for using list, dictionary, Requirement, or Consequence tag values. TODO: Support those types?

Examples:

>>> pf = ParseFormat()
>>> pf.parseTagValue('hi')
'hi'
>>> pf.parseTagValue('3')
3
>>> pf.parseTagValue('3.0')
3.0
>>> pf.parseTagValue('True')
True
>>> pf.parseTagValue('False')
False
>>> pf.parseTagValue('None') is None
True
>>> pf.parseTagValue('none')
'none'
def unparseTagValue( self, value: Union[bool, int, float, str, list, dict, NoneType, exploration.base.Requirement, List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]]) -> str:
845    def unparseTagValue(self, value: base.TagValue) -> str:
846        """
847        Converts a tag value into a string that would be parsed back into a
848        tag value via `parseTagValue`. Currently does not work for list,
849        dictionary, Requirement, or Consequence values.
850        TODO: Those
851        """
852        return str(value)

Converts a tag value into a string that would be parsed back into a tag value via parseTagValue. Currently does not work for list, dictionary, Requirement, or Consequence values. TODO: Those

def hasZoneParts(self, name: str) -> bool:
854    def hasZoneParts(self, name: str) -> bool:
855        """
856        Returns true if the specified name contains zone parts (using
857        the `zoneSeparator`).
858        """
859        return self.formatDict[Lexeme.zoneSeparator] in name

Returns true if the specified name contains zone parts (using the zoneSeparator).

def splitZone(self, name: str) -> Tuple[List[str], str]:
861    def splitZone(
862        self,
863        name: str
864    ) -> Tuple[List[base.Zone], base.DecisionName]:
865        """
866        Splits a decision name that includes zone information into the
867        list-of-zones part and the decision part. If there is no zone
868        information in the name, the list-of-zones will be an empty
869        list.
870        """
871        sep = self.formatDict[Lexeme.zoneSeparator]
872        parts = name.split(sep)
873        return (list(parts[:-1]), parts[-1])

Splits a decision name that includes zone information into the list-of-zones part and the decision part. If there is no zone information in the name, the list-of-zones will be an empty list.

def prefixWithZone(self, name: str, zone: str) -> str:
875    def prefixWithZone(
876        self,
877        name: base.DecisionName,
878        zone: base.Zone
879    ) -> base.DecisionName:
880        """
881        Returns the given decision name, prefixed with the given zone
882        name. Does NOT check whether the decision name already includes
883        a prefix or not.
884        """
885        return zone + self.formatDict[Lexeme.zoneSeparator] + name

Returns the given decision name, prefixed with the given zone name. Does NOT check whether the decision name already includes a prefix or not.

def parseAnyTransitionFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0) -> Tuple[Tuple[str, List[bool]], int]:
887    def parseAnyTransitionFromTokens(
888        self,
889        tokens: LexedTokens,
890        start: int = 0
891    ) -> Tuple[base.TransitionWithOutcomes, int]:
892        """
893        Parses a `base.TransitionWithOutcomes` from a tokens list,
894        accepting either a transition name or a transition name followed
895        by a `Lexeme.withDetails` followed by a string of success and
896        failure indicator characters. Returns a tuple containing a
897        `base.TransitionWithOutcomes` and an integer indicating the end
898        index of the parsed item within the tokens.
899        """
900        # Normalize start index so we can do index math
901        if start < 0:
902            useIndex = len(tokens) + start
903        else:
904            useIndex = start
905
906        try:
907            first = tokens[useIndex]
908        except IndexError:
909            raise ParseError(
910                f"Invalid token index: {start!r} among {len(tokens)}"
911                f" tokens."
912            )
913
914        if isinstance(first, Lexeme):
915            raise ParseError(
916                f"Expecting a transition name (possibly with a"
917                f" success/failure indicator string) but first token is"
918                f" {first!r}."
919            )
920
921        try:
922            second = tokens[useIndex + 1]
923            third = tokens[useIndex + 2]
924        except IndexError:
925            return ((first, []), useIndex)
926
927        if second != Lexeme.withDetails or isinstance(third, Lexeme):
928            return ((first, []), useIndex)
929
930        outcomes = []
931        for char in third:
932            if char == self.successIndicator:
933                outcomes.append(True)
934            elif char == self.failureIndicator:
935                outcomes.append(False)
936            else:
937                return ((first, []), useIndex)
938
939        return ((first, outcomes), useIndex + 2)

Parses a base.TransitionWithOutcomes from a tokens list, accepting either a transition name or a transition name followed by a Lexeme.withDetails followed by a string of success and failure indicator characters. Returns a tuple containing a base.TransitionWithOutcomes and an integer indicating the end index of the parsed item within the tokens.

def parseTransitionWithOutcomes(self, content: str) -> Tuple[str, List[bool]]:
941    def parseTransitionWithOutcomes(
942        self,
943        content: str
944    ) -> base.TransitionWithOutcomes:
945        """
946        Takes a transition that may have outcomes listed as a series of
947        s/f strings after a colon and returns the corresponding
948        `TransitionWithOutcomes` tuple. Calls `lex` and then
949        `parseAnyTransitionFromTokens`.
950        """
951        return self.parseAnyTransitionFromTokens(self.lex(content))[0]

Takes a transition that may have outcomes listed as a series of s/f strings after a colon and returns the corresponding TransitionWithOutcomes tuple. Calls lex and then parseAnyTransitionFromTokens.

def unparseTransitionWithOutocmes(self, transition: Union[str, Tuple[str, List[bool]]]) -> str:
953    def unparseTransitionWithOutocmes(
954        self,
955        transition: base.AnyTransition
956    ) -> str:
957        """
958        Turns a `base.AnyTransition` back into a string that would parse
959        to an equivalent `base.TransitionWithOutcomes` via
960        `parseTransitionWithOutcomes`. If a bare `base.Transition` is
961        given, returns a string that would result in a
962        `base.TransitionWithOutcomes` that has an empty outcomes
963        sequence.
964        """
965        if isinstance(transition, base.Transition):
966            return transition
967        elif (
968            isinstance(transition, tuple)
969        and len(transition) == 2
970        and isinstance(transition[0], base.Transition)
971        and isinstance(transition[1], list)
972        and all(isinstance(sfi, bool) for sfi in transition[1])
973        ):
974            if len(transition[1]) == 0:
975                return transition[0]
976            else:
977                result = transition[0] + self.formatDict[Lexeme.withDetails]
978                for outcome in transition[1]:
979                    if outcome:
980                        result += self.successIndicator
981                    else:
982                        result += self.failureIndicator
983                return result
984        else:
985            raise TypeError(
986                f"Invalid AnyTransition: neither a string, nor a"
987                f" length-2 tuple consisting of a string followed by a"
988                f" list of booleans. Got: {transition!r}"
989            )

Turns a base.AnyTransition back into a string that would parse to an equivalent base.TransitionWithOutcomes via parseTransitionWithOutcomes. If a bare base.Transition is given, returns a string that would result in a base.TransitionWithOutcomes that has an empty outcomes sequence.

def parseSpecificTransition(self, content: str) -> Tuple[str, str]:
 991    def parseSpecificTransition(
 992        self,
 993        content: str
 994    ) -> Tuple[base.DecisionName, base.Transition]:
 995        """
 996        Splits a decision:transition pair to the decision and transition
 997        part, using a custom separator if one is defined.
 998        """
 999        sep = self.formatDict[Lexeme.withDetails]
1000        n = content.count(sep)
1001        if n == 0:
1002            raise ParseError(
1003                f"Cannot split '{content}' into a decision name and a"
1004                f" transition name (no separator '{sep}' found)."
1005            )
1006        elif n > 1:
1007            raise ParseError(
1008                f"Cannot split '{content}' into a decision name and a"
1009                f" transition name (too many ({n}) '{sep}' separators"
1010                f" found)."
1011            )
1012        else:
1013            return cast(
1014                Tuple[base.DecisionName, base.Transition],
1015                tuple(content.split(sep))
1016            )

Splits a decision:transition pair to the decision and transition part, using a custom separator if one is defined.

def splitDirections(self, content: str) -> Tuple[Optional[str], Optional[str]]:
1018    def splitDirections(
1019        self,
1020        content: str
1021    ) -> Tuple[Optional[str], Optional[str]]:
1022        """
1023        Splits a piece of text using the 'Lexeme.reciprocalSeparator'
1024        into two pieces. If there is no separator, the second piece will
1025        be `None`; if either side of the separator is blank, that side
1026        will be `None`, and if there is more than one separator, a
1027        `ParseError` will be raised. Whitespace will be stripped from
1028        both sides of each result.
1029
1030        Examples:
1031
1032        >>> pf = ParseFormat()
1033        >>> pf.splitDirections('abc / def')
1034        ('abc', 'def')
1035        >>> pf.splitDirections('abc def ')
1036        ('abc def', None)
1037        >>> pf.splitDirections('abc def /')
1038        ('abc def', None)
1039        >>> pf.splitDirections('/abc def')
1040        (None, 'abc def')
1041        >>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
1042        Traceback (most recent call last):
1043          ...
1044        ParseError: ...
1045        """
1046        sep = self.formatDict[Lexeme.reciprocalSeparator]
1047        count = content.count(sep)
1048        if count > 1:
1049            raise ParseError(
1050                f"Too many split points ('{sep}') in content:"
1051                f" '{content}' (only one is allowed)."
1052            )
1053
1054        elif count == 1:
1055            before, after = content.split(sep)
1056            before = before.strip()
1057            after = after.strip()
1058            return (before or None, after or None)
1059
1060        else: # no split points
1061            stripped = content.strip()
1062            if stripped:
1063                return stripped, None
1064            else:
1065                return None, None

Splits a piece of text using the 'Lexeme.reciprocalSeparator' into two pieces. If there is no separator, the second piece will be None; if either side of the separator is blank, that side will be None, and if there is more than one separator, a ParseError will be raised. Whitespace will be stripped from both sides of each result.

Examples:

>>> pf = ParseFormat()
>>> pf.splitDirections('abc / def')
('abc', 'def')
>>> pf.splitDirections('abc def ')
('abc def', None)
>>> pf.splitDirections('abc def /')
('abc def', None)
>>> pf.splitDirections('/abc def')
(None, 'abc def')
>>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
  ...
ParseError: ...
def parseItem(self, item: str) -> Union[str, Tuple[str, int], Tuple[str, str]]:
1067    def parseItem(
1068        self,
1069        item: str
1070    ) -> Union[
1071        base.Capability,
1072        Tuple[base.Token, int],
1073        Tuple[base.MechanismName, base.MechanismState]
1074    ]:
1075        """
1076        Parses an item, which is a capability (just a string), a
1077        token-type*number pair (returned as a tuple with the number
1078        converted to an integer), or a mechanism-name:state pair
1079        (returned as a tuple with the state as a string). The
1080        'Lexeme.tokenCount' and `Lexeme.mechanismSeparator` format
1081        values determine the separators that this looks for.
1082        """
1083        tsep = self.formatDict[Lexeme.tokenCount]
1084        msep = self.formatDict[Lexeme.mechanismSeparator]
1085        if tsep in item:
1086            # It's a token w/ an associated count
1087            parts = item.split(tsep)
1088            if len(parts) != 2:
1089                raise ParseError(
1090                    f"Item '{item}' has a '{tsep}' but doesn't separate"
1091                    f" into a token type and a count."
1092                )
1093            typ, count = parts
1094            try:
1095                num = int(count)
1096            except ValueError:
1097                raise ParseError(
1098                    f"Item '{item}' has invalid token count '{count}'."
1099                )
1100
1101            return (typ, num)
1102        elif msep in item:
1103            parts = item.split(msep)
1104            mechanism = msep.join(parts[:-1])
1105            state = parts[-1]
1106            if mechanism.endswith(':'):
1107                # Just a zone-qualified name...
1108                return item
1109            else:
1110                return (mechanism, state)
1111        else:
1112            # It's just a capability
1113            return item

Parses an item, which is a capability (just a string), a token-type*number pair (returned as a tuple with the number converted to an integer), or a mechanism-name:state pair (returned as a tuple with the state as a string). The 'Lexeme.tokenCount' and Lexeme.mechanismSeparator format values determine the separators that this looks for.

def unparseDecisionSpecifier(self, spec: exploration.base.DecisionSpecifier) -> str:
1115    def unparseDecisionSpecifier(self, spec: base.DecisionSpecifier) -> str:
1116        """
1117        Turns a decision specifier back into a string, which would be
1118        parsed as a decision specifier as part of various different
1119        things.
1120
1121        For example:
1122
1123        >>> pf = ParseFormat()
1124        >>> pf.unparseDecisionSpecifier(
1125        ...     base.DecisionSpecifier(None, None, 'where')
1126        ... )
1127        'where'
1128        >>> pf.unparseDecisionSpecifier(
1129        ...     base.DecisionSpecifier(None, 'zone', 'where')
1130        ... )
1131        'zone::where'
1132        >>> pf.unparseDecisionSpecifier(
1133        ...     base.DecisionSpecifier('domain', 'zone', 'where')
1134        ... )
1135        'domain//zone::where'
1136        >>> pf.unparseDecisionSpecifier(
1137        ...     base.DecisionSpecifier('domain', None, 'where')
1138        ... )
1139        'domain//where'
1140        """
1141        result = spec.name
1142        if spec.zone is not None:
1143            result = (
1144                spec.zone
1145              + self.formatDict[Lexeme.zoneSeparator]
1146              + result
1147            )
1148        if spec.domain is not None:
1149            result = (
1150                spec.domain
1151              + self.formatDict[Lexeme.domainSeparator]
1152              + result
1153            )
1154        return result

Turns a decision specifier back into a string, which would be parsed as a decision specifier as part of various different things.

For example:

>>> pf = ParseFormat()
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier(None, None, 'where')
... )
'where'
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier(None, 'zone', 'where')
... )
'zone::where'
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier('domain', 'zone', 'where')
... )
'domain//zone::where'
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier('domain', None, 'where')
... )
'domain//where'
def unparseMechanismSpecifier(self, spec: exploration.base.MechanismSpecifier) -> str:
1156    def unparseMechanismSpecifier(
1157        self,
1158        spec: base.MechanismSpecifier
1159    ) -> str:
1160        """
1161        Turns a mechanism specifier back into a string, which would be
1162        parsed as a mechanism specifier as part of various different
1163        things. Note that a mechanism specifier with a zone part but no
1164        decision part is not valid, since it would parse as a decision
1165        part instead.
1166
1167        For example:
1168
1169        >>> pf = ParseFormat()
1170        >>> pf.unparseMechanismSpecifier(
1171        ...     base.MechanismSpecifier(None, None, None, 'lever')
1172        ... )
1173        'lever'
1174        >>> pf.unparseMechanismSpecifier(
1175        ...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
1176        ... )
1177        'domain//zone::decision::door'
1178        >>> pf.unparseMechanismSpecifier(
1179        ...     base.MechanismSpecifier('domain', None, None, 'door')
1180        ... )
1181        'domain//door'
1182        >>> pf.unparseMechanismSpecifier(
1183        ...     base.MechanismSpecifier(None, 'a', 'b', 'door')
1184        ... )
1185        'a::b::door'
1186        >>> pf.unparseMechanismSpecifier(
1187        ...     base.MechanismSpecifier(None, 'a', None, 'door')
1188        ... )
1189        Traceback (most recent call last):
1190        ...
1191        exploration.base.InvalidMechanismSpecifierError...
1192        >>> pf.unparseMechanismSpecifier(
1193        ...     base.MechanismSpecifier(None, None, 'a', 'door')
1194        ... )
1195        'a::door'
1196        """
1197        if spec.decision is None and spec.zone is not None:
1198            raise base.InvalidMechanismSpecifierError(
1199                f"Mechanism specifier has a zone part but no decision"
1200                f" part; it cannot be unparsed since it would parse"
1201                f" differently:\n{spec}"
1202            )
1203        result = spec.name
1204        if spec.decision is not None:
1205            result = (
1206                spec.decision
1207              + self.formatDict[Lexeme.zoneSeparator]
1208              + result
1209            )
1210        if spec.zone is not None:
1211            result = (
1212                spec.zone
1213              + self.formatDict[Lexeme.zoneSeparator]
1214              + result
1215            )
1216        if spec.domain is not None:
1217            result = (
1218                spec.domain
1219              + self.formatDict[Lexeme.domainSeparator]
1220              + result
1221            )
1222        return result

Turns a mechanism specifier back into a string, which would be parsed as a mechanism specifier as part of various different things. Note that a mechanism specifier with a zone part but no decision part is not valid, since it would parse as a decision part instead.

For example:

>>> pf = ParseFormat()
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, None, None, 'lever')
... )
'lever'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
... )
'domain//zone::decision::door'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier('domain', None, None, 'door')
... )
'domain//door'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, 'a', 'b', 'door')
... )
'a::b::door'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, 'a', None, 'door')
... )
Traceback (most recent call last):
...
exploration.base.InvalidMechanismSpecifierError...
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, None, 'a', 'door')
... )
'a::door'
def effectType( self, effectMarker: str) -> Optional[Literal['gain', 'lose', 'set', 'toggle', 'deactivate', 'edit', 'goto', 'bounce', 'follow', 'save']]:
1224    def effectType(self, effectMarker: str) -> Optional[base.EffectType]:
1225        """
1226        Returns the `base.EffectType` string corresponding to the
1227        given effect marker string. Returns `None` for an unrecognized
1228        marker.
1229        """
1230        return self.effectNames.get(effectMarker)

Returns the base.EffectType string corresponding to the given effect marker string. Returns None for an unrecognized marker.

1232    def parseCommandFromTokens(
1233        self,
1234        tokens: LexedTokens,
1235        start: int = 0,
1236        end: int = -1
1237    ) -> commands.Command:
1238        """
1239        Given tokens that specify a `commands.Command`, parses that
1240        command and returns it. Really just turns the tokens back into
1241        strings and calls `commands.command`.
1242
1243        For example:
1244
1245        >>> pf = ParseFormat()
1246        >>> t = ['val', '5']
1247        >>> c = commands.command(*t)
1248        >>> pf.parseCommandFromTokens(t) == c
1249        True
1250        >>> t = ['op', Lexeme.tokenCount, '$val', '$val']
1251        >>> c = commands.command('op', '*', '$val', '$val')
1252        >>> pf.parseCommandFromTokens(t) == c
1253        True
1254        """
1255        start, end, nTokens = normalizeEnds(tokens, start, end)
1256        args: List[str] = []
1257        for token in tokens[start:end + 1]:
1258            if isinstance(token, Lexeme):
1259                args.append(self.formatDict[token])
1260            else:
1261                args.append(token)
1262
1263        if len(args) == 0:
1264            raise ParseError(
1265                f"No arguments for command:\n{tokens[start:end + 1]}"
1266            )
1267        return commands.command(*args)

Given tokens that specify a commands.Command, parses that command and returns it. Really just turns the tokens back into strings and calls commands.command.

For example:

>>> pf = ParseFormat()
>>> t = ['val', '5']
>>> c = commands.command(*t)
>>> pf.parseCommandFromTokens(t) == c
True
>>> t = ['op', Lexeme.tokenCount, '$val', '$val']
>>> c = commands.command('op', '*', '$val', '$val')
>>> pf.parseCommandFromTokens(t) == c
True
1269    def unparseCommand(self, command: commands.Command) -> str:
1270        """
1271        Turns a `Command` back into the string that would produce that
1272        command when parsed using `parseCommandList`.
1273
1274        Note that the results will be more explicit in some cases than what
1275        `parseCommandList` would accept as input.
1276
1277        For example:
1278
1279        >>> pf = ParseFormat()
1280        >>> pf.unparseCommand(
1281        ...     commands.LiteralValue(command='val', value='5')
1282        ... )
1283        'val 5'
1284        >>> pf.unparseCommand(
1285        ...     commands.LiteralValue(command='val', value='"5"')
1286        ... )
1287        'val "5"'
1288        >>> pf.unparseCommand(
1289        ...     commands.EstablishCollection(
1290        ...         command='empty',
1291        ...         collection='list'
1292        ...     )
1293        ... )
1294        'empty list'
1295        >>> pf.unparseCommand(
1296        ...     commands.AppendValue(command='append', value='$_')
1297        ... )
1298        'append $_'
1299        """
1300        candidate = None
1301        for k, v in commands.COMMAND_SETUP.items():
1302            if v[0] == type(command):
1303                if candidate is None:
1304                    candidate = k
1305                else:
1306                    raise ValueError(
1307                        f"COMMAND_SETUP includes multiple keys with"
1308                        f" {type(command)} as their value type:"
1309                        f" '{candidate}' and '{k}'."
1310                    )
1311
1312        if candidate is None:
1313            raise ValueError(
1314                f"COMMAND_SETUP has no key with {type(command)} as its"
1315                f" value type."
1316            )
1317
1318        result = candidate
1319        for x in command[1:]:
1320            # TODO: Is this hack good enough?
1321            result += ' ' + str(x)
1322        return result

Turns a Command back into the string that would produce that command when parsed using parseCommandList.

Note that the results will be more explicit in some cases than what parseCommandList would accept as input.

For example:

>>> pf = ParseFormat()
>>> pf.unparseCommand(
...     commands.LiteralValue(command='val', value='5')
... )
'val 5'
>>> pf.unparseCommand(
...     commands.LiteralValue(command='val', value='"5"')
... )
'val "5"'
>>> pf.unparseCommand(
...     commands.EstablishCollection(
...         command='empty',
...         collection='list'
...     )
... )
'empty list'
>>> pf.unparseCommand(
...     commands.AppendValue(command='append', value='$_')
... )
'append $_'
1324    def unparseCommandList(self, commands: List[commands.Command]) -> str:
1325        """
1326        Takes a list of commands and returns a string that would parse
1327        into them using `parseOneEffectArg`. The result contains
1328        newlines and indentation to make it easier to read.
1329
1330        For example:
1331
1332        >>> pf = ParseFormat()
1333        >>> pf.unparseCommandList(
1334        ...     [commands.command('val', '5'), commands.command('pop')]
1335        ... )
1336        '{\\n  val 5;\\n  pop;\\n}'
1337        """
1338        result = self.formatDict[Lexeme.openCurly]
1339        for cmd in commands:
1340            result += f'\n  {self.unparseCommand(cmd)};'
1341        if len(commands) > 0:
1342            result += '\n'
1343        return result + self.formatDict[Lexeme.closeCurly]

Takes a list of commands and returns a string that would parse into them using parseOneEffectArg. The result contains newlines and indentation to make it easier to read.

For example:

>>> pf = ParseFormat()
>>> pf.unparseCommandList(
...     [commands.command('val', '5'), commands.command('pop')]
... )
'{\n  val 5;\n  pop;\n}'
1345    def parseCommandListFromTokens(
1346        self,
1347        tokens: LexedTokens,
1348        start: int = 0
1349    ) -> Tuple[List[commands.Command], int]:
1350        """
1351        Parses a command list from a list of lexed tokens, which must
1352        start with `Lexeme.openCurly`. Returns the parsed command list
1353        as a list of `commands.Command` objects, along with the end
1354        index of that command list (which will be the matching curly
1355        brace.
1356        """
1357        end = self.matchingBrace(
1358            tokens,
1359            start,
1360            Lexeme.openCurly,
1361            Lexeme.closeCurly
1362        )
1363        parts = list(
1364            findSeparatedParts(
1365                tokens,
1366                Lexeme.consequenceSeparator,
1367                start + 1,
1368                end - 1,
1369                Lexeme.openCurly,
1370                Lexeme.closeCurly,
1371            )
1372        )
1373        return (
1374            [
1375                self.parseCommandFromTokens(tokens, fromIndex, toIndex)
1376                for fromIndex, toIndex in parts
1377                if fromIndex <= toIndex  # ignore empty parts
1378            ],
1379            end
1380        )

Parses a command list from a list of lexed tokens, which must start with Lexeme.openCurly. Returns the parsed command list as a list of commands.Command objects, along with the end index of that command list (which will be the matching curly brace.

1382    def parseOneEffectArg(
1383        self,
1384        tokens: LexedTokens,
1385        start: int = 0,
1386        limit: Optional[int] = None
1387    ) -> Tuple[
1388        Union[
1389            base.Capability,  # covers 'str' possibility
1390            Tuple[base.Token, base.TokenCount],
1391            Tuple[Literal['skill'], base.Skill, base.Level],
1392            Tuple[base.MechanismSpecifier, base.MechanismState],
1393            base.DecisionSpecifier,
1394            base.DecisionID,
1395            Literal[Lexeme.inCommon, Lexeme.isHidden],
1396            Tuple[Literal[Lexeme.sepOrDelay, Lexeme.effectCharges], int],
1397            List[commands.Command]
1398        ],
1399        int
1400    ]:
1401        """
1402        Looks at tokens starting at the specified position and parses
1403        one or more of them as an effect argument (an argument that
1404        could be given to `base.effect`). Looks at various key `Lexeme`s
1405        to determine which type to use.
1406
1407        Items in the tokens list beyond the specified limit will not be
1408        considered, even when they in theory could be grouped with items
1409        up to the limit into a more complex argument.
1410
1411        For example:
1412
1413        >>> pf = ParseFormat()
1414        >>> pf.parseOneEffectArg(['hi'])
1415        ('hi', 0)
1416        >>> pf.parseOneEffectArg(['hi'], 1)
1417        Traceback (most recent call last):
1418        ...
1419        IndexError...
1420        >>> pf.parseOneEffectArg(['hi', 'bye'])
1421        ('hi', 0)
1422        >>> pf.parseOneEffectArg(['hi', 'bye'], 1)
1423        ('bye', 1)
1424        >>> pf.parseOneEffectArg(
1425        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1426        ...     0
1427        ... )
1428        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1429 name='gate'), 'open'), 2)
1430        >>> pf.parseOneEffectArg(
1431        ...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
1432        ...     1
1433        ... )
1434        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1435 name='gate'), 'open'), 3)
1436        >>> pf.parseOneEffectArg(
1437        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1438        ...     1
1439        ... )
1440        Traceback (most recent call last):
1441        ...
1442        exploration.parsing.ParseError...
1443        >>> pf.parseOneEffectArg(
1444        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1445        ...     2
1446        ... )
1447        ('open', 2)
1448        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
1449        (('gold', 10), 2)
1450        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
1451        Traceback (most recent call last):
1452        ...
1453        exploration.parsing.ParseError...
1454        >>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
1455        (<Lexeme.inCommon: ...>, 0)
1456        >>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
1457        (<Lexeme.isHidden: ...>, 0)
1458        >>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
1459        Traceback (most recent call last):
1460        ...
1461        exploration.parsing.ParseError...
1462        >>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
1463        ((<Lexeme.effectCharges: ...>, 3), 1)
1464        >>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
1465        Traceback (most recent call last):
1466        ...
1467        exploration.parsing.ParseError...
1468        >>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
1469        ((<Lexeme.sepOrDelay: ...>, -2), 1)
1470        >>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
1471        (('skill', 'agility', 3), 2)
1472        >>> pf.parseOneEffectArg(
1473        ...     [
1474        ...         'main',
1475        ...         Lexeme.domainSeparator,
1476        ...         'zone',
1477        ...         Lexeme.zoneSeparator,
1478        ...         'decision',
1479        ...         Lexeme.zoneSeparator,
1480        ...         'compass',
1481        ...         Lexeme.mechanismSeparator,
1482        ...         'north',
1483        ...         'south',
1484        ...         'east',
1485        ...         'west'
1486        ...     ],
1487        ...     0
1488        ... )
1489        ((MechanismSpecifier(domain='main', zone='zone',\
1490 decision='decision', name='compass'), 'north'), 8)
1491        >>> pf.parseOneEffectArg(
1492        ...     [
1493        ...         'before',
1494        ...         'main',
1495        ...         Lexeme.domainSeparator,
1496        ...         'zone',
1497        ...         Lexeme.zoneSeparator,
1498        ...         'decision',
1499        ...         Lexeme.zoneSeparator,
1500        ...         'compass',
1501        ...         'north',
1502        ...         'south',
1503        ...         'east',
1504        ...         'west'
1505        ...     ],
1506        ...     1
1507        ... )  # a mechanism specifier without a state will become a
1508        ...    # decision specifier
1509        (DecisionSpecifier(domain='main', zone='zone',\
1510 name='decision'), 5)
1511        >>> tokens = [
1512        ...     'set',
1513        ...     'main',
1514        ...     Lexeme.domainSeparator,
1515        ...     'zone',
1516        ...     Lexeme.zoneSeparator,
1517        ...     'compass',
1518        ...     'north',
1519        ...     'bounce',
1520        ... ]
1521        >>> pf.parseOneEffectArg(tokens, 0)
1522        ('set', 0)
1523        >>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
1524        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1525        >>> pf.parseOneEffectArg(tokens, 1)
1526        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1527        >>> pf.parseOneEffectArg(tokens, 6)
1528        ('north', 6)
1529        >>> pf.parseOneEffectArg(tokens, 7)
1530        ('bounce', 7)
1531        >>> pf.parseOneEffectArg(
1532        ...     [
1533        ...         "fort", Lexeme.zoneSeparator, "gate",
1534        ...             Lexeme.mechanismSeparator, "open",
1535        ...     ],
1536        ...     0
1537        ... )
1538        ((MechanismSpecifier(domain=None, zone=None, decision='fort',\
1539 name='gate'), 'open'), 4)
1540        >>> pf.parseOneEffectArg(
1541        ...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
1542        ...     0
1543        ... ) == ([commands.command('val', '5')], 3)
1544        True
1545        >>> a = [
1546        ...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
1547        ...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
1548        ...     'pop', Lexeme.closeCurly
1549        ... ]
1550        >>> cl = [
1551        ...     [commands.command('val', '5')],
1552        ...     [commands.command('append'), commands.command('pop')]
1553        ... ]
1554        >>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
1555        True
1556        >>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
1557        True
1558        >>> pf.parseOneEffectArg(a, 1)
1559        ('val', 1)
1560        >>> pf.parseOneEffectArg(a, 2)
1561        ('5', 2)
1562        >>> pf.parseOneEffectArg(a, 3)
1563        Traceback (most recent call last):
1564        ...
1565        exploration.parsing.ParseError...
1566        """
1567        start, limit, nTokens = normalizeEnds(
1568            tokens,
1569            start,
1570            limit if limit is not None else -1
1571        )
1572        if nTokens == 0:
1573            raise ParseError("No effect arguments available.")
1574
1575        first = tokens[start]
1576
1577        if nTokens == 1:
1578            if first in (Lexeme.inCommon, Lexeme.isHidden):
1579                return (first, start)
1580            elif not isinstance(first, str):
1581                raise ParseError(
1582                    f"Only one token and it's a special character"
1583                    f" ({first} = {repr(self.formatDict[first])})"
1584                )
1585            else:
1586                return (cast(base.Capability, first), start)
1587
1588        assert (nTokens > 1)
1589
1590        second = tokens[start + 1]
1591
1592        # Command lists start with an open curly brace and effect
1593        # modifiers start with a Lexme, but nothing else may
1594        if first == Lexeme.openCurly:
1595            return self.parseCommandListFromTokens(tokens, start)
1596        elif first in (Lexeme.inCommon, Lexeme.isHidden):
1597            return (first, start)
1598        elif first in (Lexeme.sepOrDelay, Lexeme.effectCharges):
1599            if not isinstance(second, str):
1600                raise ParseError(
1601                    f"Token following a modifier that needs a count"
1602                    f" must be a string in tokens:"
1603                    f"\n{tokens[start:limit or len(tokens)]}"
1604                )
1605            try:
1606                val = int(second)
1607            except ValueError:
1608                raise ParseError(
1609                    f"Token following a modifier that needs a count"
1610                    f" must be convertible to an int:"
1611                    f"\n{tokens[start:limit or len(tokens)]}"
1612                )
1613
1614            first = cast(
1615                Literal[Lexeme.sepOrDelay, Lexeme.effectCharges],
1616                first
1617            )
1618            return ((first, val), start + 1)
1619        elif not isinstance(first, str):
1620            raise ParseError(
1621                f"First token must be a string unless it's a modifier"
1622                f" lexeme or command/reversion-set opener. Got:"
1623                f"\n{tokens[start:limit or len(tokens)]}"
1624            )
1625
1626        # If we have two strings in a row, then the first is our parsed
1627        # value alone and we'll parse the second separately.
1628        if isinstance(second, str):
1629            return (first, start)
1630        elif second in (Lexeme.inCommon, Lexeme.isHidden):
1631            return (first, start)
1632
1633        # Must have at least 3 tokens at this point, or else we need to
1634        # have the inCommon or isHidden lexeme second.
1635        if nTokens < 3:
1636            return (first, start)
1637
1638        third = tokens[start + 2]
1639        if not isinstance(third, str):
1640            return (first, start)
1641
1642        second = cast(Lexeme, second)
1643        third = cast(str, third)
1644
1645        if second in (Lexeme.tokenCount, Lexeme.skillLevel):
1646            try:
1647                num = int(third)
1648            except ValueError:
1649                raise ParseError(
1650                    f"Invalid effect tokens: count for Tokens or level"
1651                    f" for Skill must be convertible to an integer."
1652                    f"\n{tokens[start:limit + 1]}"
1653                )
1654            if second == Lexeme.tokenCount:
1655                return ((first, num), start + 2)  # token/count pair
1656            else:
1657                return (('skill', first, num), start + 2)  # token/count pair
1658
1659        elif second == Lexeme.mechanismSeparator:  # bare mechanism
1660            return (
1661                (
1662                    base.MechanismSpecifier(
1663                        domain=None,
1664                        zone=None,
1665                        decision=None,
1666                        name=first
1667                    ),
1668                    third
1669                ),
1670                start + 2
1671            )
1672
1673        elif second in (Lexeme.domainSeparator, Lexeme.zoneSeparator):
1674            try:
1675                mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
1676                    tokens,
1677                    start
1678                )  # works whether it's a mechanism or decision specifier...
1679            except ParseError:
1680                return self.parseDecisionSpecifierFromTokens(tokens, start)
1681            if mEnd + 2 > limit:
1682                # No room for following mechanism separator + state
1683                return self.parseDecisionSpecifierFromTokens(tokens, start)
1684            sep = tokens[mEnd + 1]
1685            after = tokens[mEnd + 2]
1686            if sep == Lexeme.mechanismSeparator:
1687                if not isinstance(after, str):
1688                    raise ParseError(
1689                        f"Mechanism separator not followed by state:"
1690                        f"\n{tokens[start]}"
1691                    )
1692                return ((mSpec, after), mEnd + 2)
1693            else:
1694                # No mechanism separator afterwards
1695                return self.parseDecisionSpecifierFromTokens(tokens, start)
1696
1697        else:  # unrecognized as a longer combo
1698            return (first, start)

Looks at tokens starting at the specified position and parses one or more of them as an effect argument (an argument that could be given to base.effect). Looks at various key Lexemes to determine which type to use.

Items in the tokens list beyond the specified limit will not be considered, even when they in theory could be grouped with items up to the limit into a more complex argument.

For example:

>>> pf = ParseFormat()
>>> pf.parseOneEffectArg(['hi'])
('hi', 0)
>>> pf.parseOneEffectArg(['hi'], 1)
Traceback (most recent call last):
...
IndexError...
>>> pf.parseOneEffectArg(['hi', 'bye'])
('hi', 0)
>>> pf.parseOneEffectArg(['hi', 'bye'], 1)
('bye', 1)
>>> pf.parseOneEffectArg(
...     ['gate', Lexeme.mechanismSeparator, 'open'],
...     0
... )
((MechanismSpecifier(domain=None, zone=None, decision=None, name='gate'), 'open'), 2)
>>> pf.parseOneEffectArg(
...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
...     1
... )
((MechanismSpecifier(domain=None, zone=None, decision=None, name='gate'), 'open'), 3)
>>> pf.parseOneEffectArg(
...     ['gate', Lexeme.mechanismSeparator, 'open'],
...     1
... )
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg(
...     ['gate', Lexeme.mechanismSeparator, 'open'],
...     2
... )
('open', 2)
>>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
(('gold', 10), 2)
>>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
(<Lexeme.inCommon: ...>, 0)
>>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
(<Lexeme.isHidden: ...>, 0)
>>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
((<Lexeme.effectCharges: ...>, 3), 1)
>>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
((<Lexeme.sepOrDelay: ...>, -2), 1)
>>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
(('skill', 'agility', 3), 2)
>>> pf.parseOneEffectArg(
...     [
...         'main',
...         Lexeme.domainSeparator,
...         'zone',
...         Lexeme.zoneSeparator,
...         'decision',
...         Lexeme.zoneSeparator,
...         'compass',
...         Lexeme.mechanismSeparator,
...         'north',
...         'south',
...         'east',
...         'west'
...     ],
...     0
... )
((MechanismSpecifier(domain='main', zone='zone', decision='decision', name='compass'), 'north'), 8)
>>> pf.parseOneEffectArg(
...     [
...         'before',
...         'main',
...         Lexeme.domainSeparator,
...         'zone',
...         Lexeme.zoneSeparator,
...         'decision',
...         Lexeme.zoneSeparator,
...         'compass',
...         'north',
...         'south',
...         'east',
...         'west'
...     ],
...     1
... )  # a mechanism specifier without a state will become a
...    # decision specifier
(DecisionSpecifier(domain='main', zone='zone', name='decision'), 5)
>>> tokens = [
...     'set',
...     'main',
...     Lexeme.domainSeparator,
...     'zone',
...     Lexeme.zoneSeparator,
...     'compass',
...     'north',
...     'bounce',
... ]
>>> pf.parseOneEffectArg(tokens, 0)
('set', 0)
>>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
(DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
>>> pf.parseOneEffectArg(tokens, 1)
(DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
>>> pf.parseOneEffectArg(tokens, 6)
('north', 6)
>>> pf.parseOneEffectArg(tokens, 7)
('bounce', 7)
>>> pf.parseOneEffectArg(
...     [
...         "fort", Lexeme.zoneSeparator, "gate",
...             Lexeme.mechanismSeparator, "open",
...     ],
...     0
... )
((MechanismSpecifier(domain=None, zone=None, decision='fort', name='gate'), 'open'), 4)
>>> pf.parseOneEffectArg(
...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
...     0
... ) == ([commands.command('val', '5')], 3)
True
>>> a = [
...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
...     'pop', Lexeme.closeCurly
... ]
>>> cl = [
...     [commands.command('val', '5')],
...     [commands.command('append'), commands.command('pop')]
... ]
>>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
True
>>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
True
>>> pf.parseOneEffectArg(a, 1)
('val', 1)
>>> pf.parseOneEffectArg(a, 2)
('5', 2)
>>> pf.parseOneEffectArg(a, 3)
Traceback (most recent call last):
...
ParseError...
1700    def coalesceEffectArgs(
1701        self,
1702        tokens: LexedTokens,
1703        start: int = 0,
1704        end: int = -1
1705    ) -> Tuple[
1706        List[  # List of effect args
1707            Union[
1708                base.Capability,  # covers 'str' possibility
1709                Tuple[base.Token, base.TokenCount],
1710                Tuple[Literal['skill'], base.Skill, base.Level],
1711                Tuple[base.MechanismSpecifier, base.MechanismState],
1712                base.DecisionSpecifier,
1713                List[commands.Command],
1714                Set[str]
1715            ]
1716        ],
1717        Tuple[  # Slots for modifiers: common/hidden/charges/delay
1718            Optional[bool],
1719            Optional[bool],
1720            Optional[int],
1721            Optional[int],
1722        ]
1723    ]:
1724        """
1725        Given a region of a lexed tokens list which contains one or more
1726        effect arguments, combines token sequences representing things
1727        like capabilities, mechanism states, token counts, and skill
1728        levels, representing these using the tuples that would be passed
1729        to `base.effect`. Returns a tuple with two elements:
1730
1731        - First, a list that contains several different kinds of
1732            objects, each of which is distinguishable by its type or
1733            part of its value.
1734        - Next, a tuple with four entires for common, hidden, charges,
1735            and/or delay values based on the presence of modifier
1736            sequences. Any or all of these may be `None` if the relevant
1737            modifier was not present (the usual case).
1738
1739        For example:
1740
1741        >>> pf = ParseFormat()
1742        >>> pf.coalesceEffectArgs(["jump"])
1743        (['jump'], (None, None, None, None))
1744        >>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
1745        ([('coin', 3), 'fly'], (None, None, None, None))
1746        >>> pf.coalesceEffectArgs(
1747        ...     [
1748        ...         "fort", Lexeme.zoneSeparator, "gate",
1749        ...             Lexeme.mechanismSeparator, "open"
1750        ...     ]
1751        ... )
1752        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1753 name='gate'), 'open')], (None, None, None, None))
1754        >>> pf.coalesceEffectArgs(
1755        ...     [
1756        ...         "main", Lexeme.domainSeparator, "cliff"
1757        ...     ]
1758        ... )
1759        ([DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1760 (None, None, None, None))
1761        >>> pf.coalesceEffectArgs(
1762        ...     [
1763        ...         "door", Lexeme.mechanismSeparator, "open"
1764        ...     ]
1765        ... )
1766        ([(MechanismSpecifier(domain=None, zone=None, decision=None,\
1767 name='door'), 'open')], (None, None, None, None))
1768        >>> pf.coalesceEffectArgs(
1769        ...     [
1770        ...         "fort", Lexeme.zoneSeparator, "gate",
1771        ...             Lexeme.mechanismSeparator, "open",
1772        ...         "canJump",
1773        ...         "coins", Lexeme.tokenCount, "3",
1774        ...         Lexeme.inCommon,
1775        ...         "agility", Lexeme.skillLevel, "-1",
1776        ...         Lexeme.sepOrDelay, "0",
1777        ...         "main", Lexeme.domainSeparator, "cliff"
1778        ...     ]
1779        ... )
1780        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1781 name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1),\
1782 DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1783 (True, None, None, 0))
1784        >>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
1785        (['bounce'], (None, True, None, None))
1786        >>> pf.coalesceEffectArgs(
1787        ...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
1788        ... )
1789        (['goto', '3'], (True, True, None, None))
1790        """
1791        start, end, nTokens = normalizeEnds(tokens, start, end)
1792        where = start
1793        result: List[  # List of effect args
1794            Union[
1795                base.Capability,  # covers 'str' possibility
1796                Tuple[base.Token, base.TokenCount],
1797                Tuple[Literal['skill'], base.Skill, base.Level],
1798                Tuple[base.MechanismSpecifier, base.MechanismState],
1799                base.DecisionSpecifier,
1800                List[commands.Command],
1801                Set[str]
1802            ]
1803        ] = []
1804        inCommon: Optional[bool] = None
1805        isHidden: Optional[bool] = None
1806        charges: Optional[int] = None
1807        delay: Optional[int] = None
1808        while where <= end:
1809            following, thisEnd = self.parseOneEffectArg(tokens, where, end)
1810            if following == Lexeme.inCommon:
1811                if inCommon is not None:
1812                    raise ParseError(
1813                        f"In-common effect modifier specified more than"
1814                        f" once in effect args:"
1815                        f"\n{tokens[start:end + 1]}"
1816                    )
1817                inCommon = True
1818            elif following == Lexeme.isHidden:
1819                if isHidden is not None:
1820                    raise ParseError(
1821                        f"Is-hidden effect modifier specified more than"
1822                        f" once in effect args:"
1823                        f"\n{tokens[start:end + 1]}"
1824                    )
1825                isHidden = True
1826            elif (
1827                isinstance(following, tuple)
1828            and len(following) == 2
1829            and following[0] in (Lexeme.effectCharges, Lexeme.sepOrDelay)
1830            and isinstance(following[1], int)
1831            ):
1832                if following[0] == Lexeme.effectCharges:
1833                    if charges is not None:
1834                        raise ParseError(
1835                            f"Charges effect modifier specified more than"
1836                            f" once in effect args:"
1837                            f"\n{tokens[start:end + 1]}"
1838                        )
1839                    charges = following[1]
1840                else:
1841                    if delay is not None:
1842                        raise ParseError(
1843                            f"Delay effect modifier specified more than"
1844                            f" once in effect args:"
1845                            f"\n{tokens[start:end + 1]}"
1846                        )
1847                    delay = following[1]
1848            elif (
1849                    isinstance(following, base.Capability)
1850                 or (
1851                    isinstance(following, tuple)
1852                and len(following) == 2
1853                and isinstance(following[0], base.Token)
1854                and isinstance(following[1], base.TokenCount)
1855                ) or (
1856                    isinstance(following, tuple)
1857                and len(following) == 3
1858                and following[0] == 'skill'
1859                and isinstance(following[1], base.Skill)
1860                and isinstance(following[2], base.Level)
1861                ) or (
1862                    isinstance(following, tuple)
1863                and len(following) == 2
1864                and isinstance(following[0], base.MechanismSpecifier)
1865                and isinstance(following[1], base.MechanismState)
1866                ) or (
1867                    isinstance(following, base.DecisionSpecifier)
1868                ) or (
1869                    isinstance(following, list)
1870                and all(isinstance(item, tuple) for item in following)
1871                    # TODO: Stricter command list check here?
1872                ) or (
1873                    isinstance(following, set)
1874                and all(isinstance(item, str) for item in following)
1875                )
1876            ):
1877                result.append(following)
1878            else:
1879                raise ParseError(f"Invalid coalesced argument: {following}")
1880            where = thisEnd + 1
1881
1882        return (result, (inCommon, isHidden, charges, delay))

Given a region of a lexed tokens list which contains one or more effect arguments, combines token sequences representing things like capabilities, mechanism states, token counts, and skill levels, representing these using the tuples that would be passed to base.effect. Returns a tuple with two elements:

  • First, a list that contains several different kinds of objects, each of which is distinguishable by its type or part of its value.
  • Next, a tuple with four entires for common, hidden, charges, and/or delay values based on the presence of modifier sequences. Any or all of these may be None if the relevant modifier was not present (the usual case).

For example:

>>> pf = ParseFormat()
>>> pf.coalesceEffectArgs(["jump"])
(['jump'], (None, None, None, None))
>>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
([('coin', 3), 'fly'], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "fort", Lexeme.zoneSeparator, "gate",
...             Lexeme.mechanismSeparator, "open"
...     ]
... )
([(MechanismSpecifier(domain=None, zone=None, decision='fort', name='gate'), 'open')], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "main", Lexeme.domainSeparator, "cliff"
...     ]
... )
([DecisionSpecifier(domain='main', zone=None, name='cliff')], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "door", Lexeme.mechanismSeparator, "open"
...     ]
... )
([(MechanismSpecifier(domain=None, zone=None, decision=None, name='door'), 'open')], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "fort", Lexeme.zoneSeparator, "gate",
...             Lexeme.mechanismSeparator, "open",
...         "canJump",
...         "coins", Lexeme.tokenCount, "3",
...         Lexeme.inCommon,
...         "agility", Lexeme.skillLevel, "-1",
...         Lexeme.sepOrDelay, "0",
...         "main", Lexeme.domainSeparator, "cliff"
...     ]
... )
([(MechanismSpecifier(domain=None, zone=None, decision='fort', name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1), DecisionSpecifier(domain='main', zone=None, name='cliff')], (True, None, None, 0))
>>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
(['bounce'], (None, True, None, None))
>>> pf.coalesceEffectArgs(
...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
... )
(['goto', '3'], (True, True, None, None))
def parseEffectFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Effect:
1884    def parseEffectFromTokens(
1885        self,
1886        tokens: LexedTokens,
1887        start: int = 0,
1888        end: int = -1
1889    ) -> base.Effect:
1890        """
1891        Given a region of a list of lexed tokens specifying an effect,
1892        returns the `Effect` object that those tokens specify.
1893        """
1894        start, end, nTokens = normalizeEnds(tokens, start, end)
1895
1896        # Check for empty list
1897        if nTokens == 0:
1898            raise ParseError(
1899                "Effect must include at least a type."
1900            )
1901
1902        firstPart = tokens[start]
1903
1904        if isinstance(firstPart, Lexeme):
1905            raise ParseError(
1906                f"First part of effect must be an effect type. Got"
1907                f" {firstPart} ({repr(self.formatDict[firstPart])})."
1908            )
1909
1910        firstPart = cast(str, firstPart)
1911
1912        # Get the effect type
1913        fType = self.effectType(firstPart)
1914
1915        if fType is None:
1916            raise ParseError(
1917                f"Unrecognized effect type {firstPart!r}. Check the"
1918                f" EffectType entries in the effect names dictionary."
1919            )
1920
1921        if start + 1 > end:  # No tokens left: set empty args
1922            groupedArgs: List[
1923                Union[
1924                    base.Capability,  # covers 'str' possibility
1925                    Tuple[base.Token, base.TokenCount],
1926                    Tuple[Literal['skill'], base.Skill, base.Level],
1927                    Tuple[base.MechanismSpecifier, base.MechanismState],
1928                    base.DecisionSpecifier,
1929                    List[commands.Command],
1930                    Set[str]
1931                ]
1932            ] = []
1933            modifiers: Tuple[
1934                Optional[bool],
1935                Optional[bool],
1936                Optional[int],
1937                Optional[int]
1938            ] = (None, None, None, None)
1939        else:  # Coalesce remaining tokens if there are any
1940            groupedArgs, modifiers = self.coalesceEffectArgs(
1941                tokens,
1942                start + 1,
1943                end
1944            )
1945
1946        # Set up arguments for base.effect and handle modifiers first
1947        args: Dict[
1948            str,
1949            Union[
1950                None,
1951                base.ContextSpecifier,
1952                base.Capability,
1953                Tuple[base.Token, base.TokenCount],
1954                Tuple[Literal['skill'], base.Skill, base.Level],
1955                Tuple[base.MechanismSpecifier, base.MechanismState],
1956                Tuple[base.MechanismSpecifier, List[base.MechanismState]],
1957                List[base.Capability],
1958                base.AnyDecisionSpecifier,
1959                Tuple[base.AnyDecisionSpecifier, base.FocalPointName],
1960                bool,
1961                int,
1962                base.SaveSlot,
1963                Tuple[base.SaveSlot, Set[str]]
1964            ]
1965        ] = {}
1966        if modifiers[0]:
1967            args['applyTo'] = 'common'
1968        if modifiers[1]:
1969            args['hidden'] = True
1970        else:
1971            args['hidden'] = False
1972        if modifiers[2] is not None:
1973            args['charges'] = modifiers[2]
1974        if modifiers[3] is not None:
1975            args['delay'] = modifiers[3]
1976
1977        # Now handle the main effect-type-based argument
1978        if fType in ("gain", "lose"):
1979            if len(groupedArgs) != 1:
1980                raise ParseError(
1981                    f"'{fType}' effect must have exactly one grouped"
1982                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
1983                )
1984            thing = groupedArgs[0]
1985            if isinstance(thing, tuple):
1986                if len(thing) == 2:
1987                    if (
1988                        not isinstance(thing[0], base.Token)
1989                     or not isinstance(thing[1], base.TokenCount)
1990                    ):
1991                        raise ParseError(
1992                            f"'{fType}' effect grouped arg pair must be a"
1993                            f" (token, amount) pair. Got:\n{thing}"
1994                        )
1995                elif len(thing) == 3:
1996                    if (
1997                        thing[0] != 'skill'
1998                     or not isinstance(thing[1], base.Skill)
1999                     or not isinstance(thing[2], base.Level)
2000                    ):
2001                        raise ParseError(
2002                            f"'{fType}' effect grouped arg pair must be a"
2003                            f" (token, amount) pair. Got:\n{thing}"
2004                        )
2005                else:
2006                    raise ParseError(
2007                        f"'{fType}' effect grouped arg tuple must have"
2008                        f" length 2 or 3. Got (length {len(thing)}):\n{thing}"
2009                    )
2010            elif not isinstance(thing, base.Capability):
2011                raise ParseError(
2012                    f"'{fType}' effect grouped arg must be a capability"
2013                    f" or a (token, amount) tuple. Got:\n{thing}"
2014                )
2015            args[fType] = thing
2016            return base.effect(**args)  # type:ignore
2017
2018        elif fType == "set":
2019            if len(groupedArgs) != 1:
2020                raise ParseError(
2021                    f"'{fType}' effect must have exactly one grouped"
2022                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
2023                )
2024            setVal = groupedArgs[0]
2025            if not isinstance(
2026                setVal,
2027                tuple
2028            ):
2029                raise ParseError(
2030                    f"'{fType}' effect grouped arg must be a tuple. Got:"
2031                    f"\n{setVal}"
2032                )
2033            if len(setVal) == 2:
2034                setWhat, setTo = setVal
2035                if (
2036                    isinstance(setWhat, base.Token)
2037                and isinstance(setTo, base.TokenCount)
2038                ) or (
2039                    isinstance(setWhat, base.MechanismSpecifier)
2040                and isinstance(setTo, base.MechanismState)
2041                ):
2042                    args[fType] = setVal
2043                    return base.effect(**args)  # type:ignore
2044                else:
2045                    raise ParseError(
2046                        f"Invalid '{fType}' effect grouped args:"
2047                        f"\n{groupedArgs}"
2048                    )
2049            elif len(setVal) == 3:
2050                indicator, whichSkill, setTo = setVal
2051                if (
2052                    indicator == 'skill'
2053                and isinstance(whichSkill, base.Skill)
2054                and isinstance(setTo, base.Level)
2055                ):
2056                    args[fType] = setVal
2057                    return base.effect(**args)  # type:ignore
2058                else:
2059                    raise ParseError(
2060                        f"Invalid '{fType}' effect grouped args (not a"
2061                        f" skill):\n{groupedArgs}"
2062                    )
2063            else:
2064                raise ParseError(
2065                    f"Invalid '{fType}' effect grouped args (wrong"
2066                    f" length tuple):\n{groupedArgs}"
2067                )
2068
2069        elif fType == "toggle":
2070            if len(groupedArgs) == 0:
2071                raise ParseError(
2072                    f"'{fType}' effect must have at least one grouped"
2073                    f" argument. Got:\n{groupedArgs}"
2074                )
2075            if (
2076                isinstance(groupedArgs[0], tuple)
2077            and len(groupedArgs[0]) == 2
2078            and isinstance(groupedArgs[0][0], base.MechanismSpecifier)
2079            and isinstance(groupedArgs[0][1], base.MechanismState)
2080            and all(
2081                    isinstance(a, base.MechanismState)
2082                    for a in groupedArgs[1:]
2083                )
2084            ):  # a mechanism toggle
2085                args[fType] = (
2086                    groupedArgs[0][0],
2087                    cast(
2088                        List[base.MechanismState],
2089                        [groupedArgs[0][1]] + groupedArgs[1:]
2090                    )
2091                )
2092                return base.effect(**args)  # type:ignore
2093            elif all(isinstance(a, base.Capability) for a in groupedArgs):
2094                # a capability toggle
2095                args[fType] = cast(List[base.Capability], groupedArgs)
2096                return base.effect(**args)  # type:ignore
2097            else:
2098                raise ParseError(
2099                    f"Invalid arguments for '{fType}' effect. Got:"
2100                    f"\n{groupedArgs}"
2101                )
2102
2103        elif fType in ("bounce", "deactivate"):
2104            if len(groupedArgs) != 0:
2105                raise ParseError(
2106                    f"'{fType}' effect may not include any"
2107                    f" arguments. Got {len(groupedArgs)}):"
2108                    f"\n{groupedArgs}"
2109                )
2110            args[fType] = True
2111            return base.effect(**args)  # type:ignore
2112
2113        elif fType == "follow":
2114            if len(groupedArgs) != 1:
2115                raise ParseError(
2116                    f"'{fType}' effect must include exactly one"
2117                    f" argument. Got {len(groupedArgs)}):"
2118                    f"\n{groupedArgs}"
2119                )
2120
2121            transition = groupedArgs[0]
2122            if not isinstance(transition, base.Transition):
2123                raise ParseError(
2124                    f"Invalid argument for '{fType}' effect. Needed a"
2125                    f" transition but got:\n{groupedArgs}"
2126                )
2127            args[fType] = transition
2128            return base.effect(**args)  # type:ignore
2129
2130        elif fType == "edit":
2131            if len(groupedArgs) == 0:
2132                raise ParseError(
2133                    "An 'edit' effect requires at least one argument."
2134                )
2135            for i, arg in enumerate(groupedArgs):
2136                if not isinstance(arg, list):
2137                    raise ParseError(
2138                        f"'edit' effect argument {i} is not a sub-list:"
2139                        f"\n  {arg!r}"
2140                        f"\nAmong arguments:"
2141                        f"\n  {groupedArgs}"
2142                    )
2143                for j, cmd in enumerate(arg):
2144                    if not isinstance(cmd, tuple):
2145                        raise ParseError(
2146                            f"'edit' effect argument {i} contains"
2147                            f" non-tuple part {j}:"
2148                            f"\n  {cmd!r}"
2149                            f"\nAmong arguments:"
2150                            f"\n  {groupedArgs}"
2151                        )
2152
2153            args[fType] = groupedArgs  # type:ignore
2154            return base.effect(**args)  # type:ignore
2155
2156        elif fType == "goto":
2157            if len(groupedArgs) not in (1, 2):
2158                raise ParseError(
2159                    f"A 'goto' effect must include either one or two"
2160                    f" grouped arguments. Got {len(groupedArgs)}:"
2161                    f"\n{groupedArgs}"
2162                )
2163
2164            first = groupedArgs[0]
2165            if not isinstance(
2166                first,
2167                (base.DecisionName, base.DecisionSpecifier)
2168            ):
2169                raise ParseError(
2170                    f"'{fType}' effect must first specify a destination"
2171                    f" decision. Got:\n{groupedArgs}"
2172                )
2173
2174            # Check if it's really a decision ID
2175            dSpec: base.AnyDecisionSpecifier
2176            if isinstance(first, base.DecisionName):
2177                try:
2178                    dSpec = int(first)
2179                except ValueError:
2180                    dSpec = first
2181            else:
2182                dSpec = first
2183
2184            if len(groupedArgs) == 2:
2185                second = groupedArgs[1]
2186                if not isinstance(second, base.FocalPointName):
2187                    raise ParseError(
2188                        f"'{fType}' effect must have a focal point name"
2189                        f" if it has a second part. Got:\n{groupedArgs}"
2190                    )
2191                args[fType] = (dSpec, second)
2192            else:
2193                args[fType] = dSpec
2194
2195            return base.effect(**args)  # type:ignore
2196
2197        elif fType == "save":
2198            if len(groupedArgs) not in (0, 1):
2199                raise ParseError(
2200                    f"'{fType}' effect must include exactly zero or one"
2201                    f" argument(s). Got {len(groupedArgs)}):"
2202                    f"\n{groupedArgs}"
2203                )
2204
2205            if len(groupedArgs) == 1:
2206                slot = groupedArgs[0]
2207            else:
2208                slot = base.DEFAULT_SAVE_SLOT
2209            if not isinstance(slot, base.SaveSlot):
2210                raise ParseError(
2211                    f"Invalid argument for '{fType}' effect. Needed a"
2212                    f" save slot but got:\n{groupedArgs}"
2213                )
2214            args[fType] = slot
2215            return base.effect(**args)  # type:ignore
2216
2217        else:
2218            raise ParseError(f"Invalid effect type: '{fType}'.")

Given a region of a list of lexed tokens specifying an effect, returns the Effect object that those tokens specify.

def parseEffect(self, effectStr: str) -> exploration.base.Effect:
2220    def parseEffect(self, effectStr: str) -> base.Effect:
2221        """
2222        Works like `parseEffectFromTokens` but starts with a raw string.
2223        For example:
2224
2225        >>> pf = ParseFormat()
2226        >>> pf.parseEffect("gain jump") == base.effect(gain='jump')
2227        True
2228        >>> pf.parseEffect("set door:open") == base.effect(
2229        ...     set=(
2230        ...         base.MechanismSpecifier(None, None, None, 'door'),
2231        ...         'open'
2232        ...     )
2233        ... )
2234        True
2235        >>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
2236        True
2237        >>> pf.parseEffect("set agility^3") == base.effect(
2238        ...     set=('skill', 'agility', 3)
2239        ... )
2240        True
2241        """
2242        return self.parseEffectFromTokens(self.lex(effectStr))

Works like parseEffectFromTokens but starts with a raw string. For example:

>>> pf = ParseFormat()
>>> pf.parseEffect("gain jump") == base.effect(gain='jump')
True
>>> pf.parseEffect("set door:open") == base.effect(
...     set=(
...         base.MechanismSpecifier(None, None, None, 'door'),
...         'open'
...     )
... )
True
>>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
True
>>> pf.parseEffect("set agility^3") == base.effect(
...     set=('skill', 'agility', 3)
... )
True
def unparseEffect(self, effect: exploration.base.Effect) -> str:
2244    def unparseEffect(self, effect: base.Effect) -> str:
2245        """
2246        The opposite of `parseEffect`; turns an effect back into a
2247        string reprensentation.
2248
2249        For example:
2250
2251        >>> pf = ParseFormat()
2252        >>> e = {
2253        ...     "type": "gain",
2254        ...     "applyTo": "active",
2255        ...     "value": "flight",
2256        ...     "delay": None,
2257        ...     "charges": None,
2258        ...     "hidden": False
2259        ... }
2260        >>> pf.unparseEffect(e)
2261        'gain flight'
2262        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2263        True
2264        >>> s = 'gain flight'
2265        >>> pf.unparseEffect(pf.parseEffect(s)) == s
2266        True
2267        >>> s2 = '  gain\\nflight'
2268        >>> pf.unparseEffect(pf.parseEffect(s2)) == s
2269        True
2270        >>> e = {
2271        ...     "type": "gain",
2272        ...     "applyTo": "active",
2273        ...     "value": ("gold", 5),
2274        ...     "delay": 1,
2275        ...     "charges": 2,
2276        ...     "hidden": False
2277        ... }
2278        >>> pf.unparseEffect(e)
2279        'gain gold*5 ,1 =2'
2280        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2281        True
2282        >>> e = {
2283        ...     "type": "set",
2284        ...     "applyTo": "active",
2285        ...     "value": (
2286        ...         base.MechanismSpecifier(None, None, None, "gears"),
2287        ...         "on"
2288        ...     ),
2289        ...     "delay": None,
2290        ...     "charges": 1,
2291        ...     "hidden": False
2292        ... }
2293        >>> pf.unparseEffect(e)
2294        'set gears:on =1'
2295        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2296        True
2297        >>> e = {
2298        ...     "type": "toggle",
2299        ...     "applyTo": "active",
2300        ...     "value": ["red", "blue"],
2301        ...     "delay": None,
2302        ...     "charges": None,
2303        ...     "hidden": False
2304        ... }
2305        >>> pf.unparseEffect(e)
2306        'toggle red blue'
2307        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2308        True
2309        >>> e = {
2310        ...     "type": "toggle",
2311        ...     "applyTo": "active",
2312        ...     "value": (
2313        ...         base.MechanismSpecifier(None, None, None, "switch"),
2314        ...         ["on", "off"]
2315        ...     ),
2316        ...     "delay": None,
2317        ...     "charges": None,
2318        ...     "hidden": False
2319        ... }
2320        >>> pf.unparseEffect(e)
2321        'toggle switch:on off'
2322        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2323        True
2324        >>> e = {
2325        ...     "type": "deactivate",
2326        ...     "applyTo": "active",
2327        ...     "value": None,
2328        ...     "delay": 2,
2329        ...     "charges": None,
2330        ...     "hidden": False
2331        ... }
2332        >>> pf.unparseEffect(e)
2333        'deactivate ,2'
2334        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2335        True
2336        >>> e = {
2337        ...     "type": "goto",
2338        ...     "applyTo": "common",
2339        ...     "value": 3,
2340        ...     "delay": None,
2341        ...     "charges": None,
2342        ...     "hidden": False
2343        ... }
2344        >>> pf.unparseEffect(e)
2345        'goto 3 +c'
2346        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2347        True
2348        >>> e = {
2349        ...     "type": "goto",
2350        ...     "applyTo": "common",
2351        ...     "value": 3,
2352        ...     "delay": None,
2353        ...     "charges": None,
2354        ...     "hidden": True
2355        ... }
2356        >>> pf.unparseEffect(e)
2357        'goto 3 +c +h'
2358        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2359        True
2360        >>> e = {
2361        ...     "type": "goto",
2362        ...     "applyTo": "active",
2363        ...     "value": 'home',
2364        ...     "delay": None,
2365        ...     "charges": None,
2366        ...     "hidden": False
2367        ... }
2368        >>> pf.unparseEffect(e)
2369        'goto home'
2370        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2371        True
2372        >>> e = base.effect(edit=[
2373        ...     [
2374        ...         commands.command('val', '5'),
2375        ...         commands.command('empty', 'list'),
2376        ...         commands.command('append', '$_')
2377        ...     ],
2378        ...     [
2379        ...         commands.command('val', '11'),
2380        ...         commands.command('assign', 'var', '$_'),
2381        ...         commands.command('op', '+', '$var', '$var')
2382        ...     ],
2383        ... ])
2384        >>> pf.unparseEffect(e)
2385        'edit {\\n  val 5;\\n  empty list;\\n  append $_;\\n}\
2386 {\\n  val 11;\\n  assign var $_;\\n  op + $var $var;\\n}'
2387        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2388        True
2389        """
2390        result: List[str] = []
2391
2392        # Reverse the effect type into a marker
2393        eType = effect['type']
2394        for key, val in self.effectNames.items():
2395            if val == eType:
2396                if len(result) != 0:
2397                    raise ParseError(
2398                        f"Effect map contains multiple matching entries"
2399                        f"for effect type '{effect['type']}':"
2400                        f" '{result[0]}' and '{key}'"
2401                    )
2402                result.append(key)
2403                # Don't break 'cause we'd like to check uniqueness
2404
2405        eVal = effect['value']
2406        if eType in ('gain', 'lose'):
2407            eVal = cast(Union[base.Capability, Tuple[base.Token, int]], eVal)
2408            if isinstance(eVal, str):  # a capability
2409                result.append(eVal)
2410            else:  # a token
2411                result.append(
2412                    eVal[0]
2413                  + self.formatDict[Lexeme.tokenCount]
2414                  + str(eVal[1])
2415                )
2416        elif eType == 'set':
2417            eVal = cast(
2418                # TODO: Add skill level setting here & elsewhere
2419                Union[
2420                    Tuple[base.Token, base.TokenCount],
2421                    Tuple[base.MechanismName, base.MechanismState]
2422                ],
2423                eVal
2424            )
2425            if len(eVal) != 2:
2426                raise ValueError(
2427                    f"'set' effect has non-length-2 value:"
2428                    f"\n  {repr(effect)}"
2429                )
2430            if isinstance(eVal[1], int):  # a token count
2431                result.append(eVal[0])
2432                result.append(self.formatDict[Lexeme.tokenCount])
2433                result.append(str(eVal[1]))
2434            else:  # a mechanism
2435                if isinstance(eVal[0], base.MechanismSpecifier):
2436                    mSpec = self.unparseMechanismSpecifier(eVal[0])
2437                else:
2438                    assert isinstance(eVal[0], base.MechanismName)
2439                    mSpec = eVal[0]
2440                result.append(
2441                    mSpec
2442                  + self.formatDict[Lexeme.mechanismSeparator]
2443                  + eVal[1]
2444                )
2445        elif eType == 'toggle':
2446            if isinstance(eVal, tuple):  # mechanism states
2447                tSpec, states = cast(
2448                    Tuple[
2449                        base.AnyMechanismSpecifier,
2450                        List[base.MechanismState]
2451                    ],
2452                    eVal
2453                )
2454                firstState = states[0]
2455                restStates = states[1:]
2456                if isinstance(tSpec, base.MechanismSpecifier):
2457                    mStr = self.unparseMechanismSpecifier(tSpec)
2458                else:
2459                    mStr = str(tSpec)
2460                result.append(
2461                    mStr
2462                  + self.formatDict[Lexeme.mechanismSeparator]
2463                  + firstState
2464                )
2465                result.extend(restStates)
2466            else:  # capabilities
2467                assert isinstance(eVal, list)
2468                eVal = cast(List[base.Capability], eVal)
2469                result.extend(eVal)
2470        elif eType in ('deactivate', 'bounce'):
2471            if eVal is not None:
2472                raise ValueError(
2473                    f"'{eType}' effect has non-None value:"
2474                    f"\n  {repr(effect)}"
2475                )
2476        elif eType == 'follow':
2477            eVal = cast(base.Token, eVal)
2478            result.append(eVal)
2479        elif eType == 'edit':
2480            eVal = cast(List[List[commands.Command]], eVal)
2481            if len(eVal) == 0:
2482                result[-1] = '{}'
2483            else:
2484                for cmdList in eVal:
2485                    result.append(
2486                        self.unparseCommandList(cmdList)
2487                    )
2488        elif eType == 'goto':
2489            if isinstance(eVal, base.DecisionSpecifier):
2490                result.append(self.unparseDecisionSpecifier(eVal))
2491            elif isinstance(eVal, (base.DecisionID, base.DecisionName)):
2492                result.append(str(eVal))
2493            elif (
2494                isinstance(eVal, tuple)
2495            and len(eVal) == 2
2496            and isinstance(eVal[1], base.FocalPointName)
2497            ):
2498                if isinstance(eVal[0], base.DecisionSpecifier):
2499                    result.append(self.unparseDecisionSpecifier(eVal[0]))
2500                else:
2501                    result.append(str(eVal[0]))
2502                result.append(eVal[1])
2503            else:
2504                raise ValueError(
2505                    f"'{eType}' effect has invalid value {eVal}"
2506                )
2507        else:
2508            raise ValueError(
2509                f"Unrecognized effect type '{eType}' in effect:"
2510                f"\n  {repr(effect)}"
2511            )
2512
2513        # Add modifier strings
2514        if effect['applyTo'] == 'common':
2515            result.append(self.formatDict[Lexeme.inCommon])
2516
2517        if effect['hidden']:
2518            result.append(self.formatDict[Lexeme.isHidden])
2519
2520        dVal = effect['delay']
2521        if dVal is not None:
2522            result.append(
2523                self.formatDict[Lexeme.sepOrDelay] + str(dVal)
2524            )
2525
2526        cVal = effect['charges']
2527        if cVal is not None:
2528            result.append(
2529                self.formatDict[Lexeme.effectCharges] + str(cVal)
2530            )
2531
2532        joined = ''
2533        before = False
2534        for r in result:
2535            if (
2536                r.startswith(' ')
2537             or r.startswith('\n')
2538             or r.endswith(' ')
2539             or r.endswith('\n')
2540            ):
2541                joined += r
2542                before = False
2543            else:
2544                joined += (' ' if before else '') + r
2545                before = True
2546        return joined

The opposite of parseEffect; turns an effect back into a string reprensentation.

For example:

>>> pf = ParseFormat()
>>> e = {
...     "type": "gain",
...     "applyTo": "active",
...     "value": "flight",
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'gain flight'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> s = 'gain flight'
>>> pf.unparseEffect(pf.parseEffect(s)) == s
True
>>> s2 = '  gain\nflight'
>>> pf.unparseEffect(pf.parseEffect(s2)) == s
True
>>> e = {
...     "type": "gain",
...     "applyTo": "active",
...     "value": ("gold", 5),
...     "delay": 1,
...     "charges": 2,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'gain gold*5 ,1 =2'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "set",
...     "applyTo": "active",
...     "value": (
...         base.MechanismSpecifier(None, None, None, "gears"),
...         "on"
...     ),
...     "delay": None,
...     "charges": 1,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'set gears:on =1'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "toggle",
...     "applyTo": "active",
...     "value": ["red", "blue"],
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'toggle red blue'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "toggle",
...     "applyTo": "active",
...     "value": (
...         base.MechanismSpecifier(None, None, None, "switch"),
...         ["on", "off"]
...     ),
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'toggle switch:on off'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "deactivate",
...     "applyTo": "active",
...     "value": None,
...     "delay": 2,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'deactivate ,2'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "goto",
...     "applyTo": "common",
...     "value": 3,
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'goto 3 +c'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "goto",
...     "applyTo": "common",
...     "value": 3,
...     "delay": None,
...     "charges": None,
...     "hidden": True
... }
>>> pf.unparseEffect(e)
'goto 3 +c +h'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "goto",
...     "applyTo": "active",
...     "value": 'home',
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'goto home'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = base.effect(edit=[
...     [
...         commands.command('val', '5'),
...         commands.command('empty', 'list'),
...         commands.command('append', '$_')
...     ],
...     [
...         commands.command('val', '11'),
...         commands.command('assign', 'var', '$_'),
...         commands.command('op', '+', '$var', '$var')
...     ],
... ])
>>> pf.unparseEffect(e)
'edit {\n  val 5;\n  empty list;\n  append $_;\n} {\n  val 11;\n  assign var $_;\n  op + $var $var;\n}'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
def parseDecisionSpecifierFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0) -> Tuple[Union[exploration.base.DecisionSpecifier, int], int]:
2548    def parseDecisionSpecifierFromTokens(
2549        self,
2550        tokens: LexedTokens,
2551        start: int = 0
2552    ) -> Tuple[Union[base.DecisionSpecifier, int], int]:
2553        """
2554        Parses a decision specifier starting at the specified position
2555        in the given tokens list. No ending position is specified, but
2556        instead this function returns a tuple containing the parsed
2557        `base.DecisionSpecifier` along with an index in the tokens list
2558        where the end of the specifier was found.
2559
2560        For example:
2561
2562        >>> pf = ParseFormat()
2563        >>> pf.parseDecisionSpecifierFromTokens(['m'])
2564        (DecisionSpecifier(domain=None, zone=None, name='m'), 0)
2565        >>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
2566        (12, 0)
2567        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
2568        (DecisionSpecifier(domain=None, zone=None, name='a'), 0)
2569        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
2570        (DecisionSpecifier(domain=None, zone=None, name='m'), 1)
2571        >>> pf.parseDecisionSpecifierFromTokens(
2572        ...     ['a', Lexeme.domainSeparator, 'm']
2573        ... )
2574        (DecisionSpecifier(domain='a', zone=None, name='m'), 2)
2575        >>> pf.parseDecisionSpecifierFromTokens(
2576        ...     ['a', Lexeme.zoneSeparator, 'm']
2577        ... )
2578        (DecisionSpecifier(domain=None, zone='a', name='m'), 2)
2579        >>> pf.parseDecisionSpecifierFromTokens(
2580        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
2581        ... )
2582        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2583        >>> pf.parseDecisionSpecifierFromTokens(
2584        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
2585        ... )
2586        (DecisionSpecifier(domain='a', zone='b', name='m'), 4)
2587        >>> pf.parseDecisionSpecifierFromTokens(
2588        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
2589        ... )
2590        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2591        >>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
2592        ...     ['a', Lexeme.zoneSeparator, '5'],
2593        ... )
2594        Traceback (most recent call last):
2595        ...
2596        exploration.base.InvalidDecisionSpecifierError...
2597        >>> pf.parseDecisionSpecifierFromTokens(
2598        ...     ['d', Lexeme.domainSeparator, '123']
2599        ... )
2600        Traceback (most recent call last):
2601        ...
2602        exploration.base.InvalidDecisionSpecifierError...
2603        >>> pf.parseDecisionSpecifierFromTokens(
2604        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2605        ...     1
2606        ... )
2607        Traceback (most recent call last):
2608        ...
2609        exploration.parsing.ParseError...
2610        >>> pf.parseDecisionSpecifierFromTokens(
2611        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2612        ...     2
2613        ... )
2614        (DecisionSpecifier(domain='b', zone=None, name='m'), 4)
2615        >>> pf.parseDecisionSpecifierFromTokens(
2616        ...     [
2617        ...         'a',
2618        ...         Lexeme.domainSeparator,
2619        ...         'b',
2620        ...         Lexeme.zoneSeparator,
2621        ...         'c',
2622        ...         Lexeme.zoneSeparator,
2623        ...         'm'
2624        ...     ]
2625        ... )
2626        (DecisionSpecifier(domain='a', zone='b', name='c'), 4)
2627        >>> pf.parseDecisionSpecifierFromTokens(
2628        ...     [
2629        ...         'a',
2630        ...         Lexeme.domainSeparator,
2631        ...         'b',
2632        ...         Lexeme.zoneSeparator,
2633        ...         'c',
2634        ...         Lexeme.zoneSeparator,
2635        ...         'm'
2636        ...     ],
2637        ...     2
2638        ... )
2639        (DecisionSpecifier(domain=None, zone='b', name='c'), 4)
2640        >>> pf.parseDecisionSpecifierFromTokens(
2641        ...     [
2642        ...         'a',
2643        ...         Lexeme.domainSeparator,
2644        ...         'b',
2645        ...         Lexeme.zoneSeparator,
2646        ...         'c',
2647        ...         Lexeme.zoneSeparator,
2648        ...         'm'
2649        ...     ],
2650        ...     4
2651        ... )
2652        (DecisionSpecifier(domain=None, zone='c', name='m'), 6)
2653        >>> pf.parseDecisionSpecifierFromTokens(
2654        ...     [
2655        ...         'set',
2656        ...         'main',
2657        ...         Lexeme.domainSeparator,
2658        ...         'zone',
2659        ...         Lexeme.zoneSeparator,
2660        ...         'compass',
2661        ...         'north',
2662        ...         'bounce',
2663        ...     ],
2664        ...     1
2665        ... )
2666        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
2667        """
2668        # Check bounds & normalize start index
2669        nTokens = len(tokens)
2670        if start < -nTokens:
2671            raise IndexError(
2672                f"Invalid start index {start} for {nTokens} tokens (too"
2673                f" negative)."
2674            )
2675        elif start >= nTokens:
2676            raise IndexError(
2677                f"Invalid start index {start} for {nTokens} tokens (too"
2678                f" big)."
2679            )
2680        elif start < 0:
2681            start = nTokens + start
2682
2683        assert (start < nTokens)
2684
2685        first = tokens[start]
2686        if not isinstance(first, str):
2687            raise ParseError(
2688                f"Invalid domain specifier (must start with a name or"
2689                f" id; got: {first} = {self.formatDict[first]})."
2690            )
2691
2692        ds = base.DecisionSpecifier(None, None, first)
2693        result = (base.idOrDecisionSpecifier(ds), start)
2694
2695        domain = None
2696        zoneOrDecision = None
2697
2698        if start + 1 >= nTokens:  # at end of tokens
2699            return result
2700
2701        firstSep = tokens[start + 1]
2702        if firstSep == Lexeme.domainSeparator:
2703            domain = first
2704        elif firstSep == Lexeme.zoneSeparator:
2705            zoneOrDecision = first
2706        else:
2707            return result
2708
2709        if start + 2 >= nTokens:
2710            return result
2711
2712        second = tokens[start + 2]
2713        if isinstance(second, Lexeme):
2714            return result
2715
2716        ds = base.DecisionSpecifier(domain, zoneOrDecision, second)
2717        result = (base.idOrDecisionSpecifier(ds), start + 2)
2718
2719        if start + 3 >= nTokens:
2720            return result
2721
2722        secondSep = tokens[start + 3]
2723        if start + 4 >= nTokens:
2724            return result
2725
2726        third = tokens[start + 4]
2727        if secondSep == Lexeme.zoneSeparator:
2728            if zoneOrDecision is not None:  # two in a row
2729                return result
2730            else:
2731                if not isinstance(third, base.DecisionName):
2732                    return result
2733                else:
2734                    zoneOrDecision = second
2735        else:
2736            return result
2737
2738        if isinstance(third, Lexeme):
2739            return result
2740
2741        ds = base.DecisionSpecifier(domain, zoneOrDecision, third)
2742        return (base.idOrDecisionSpecifier(ds), start + 4)

Parses a decision specifier starting at the specified position in the given tokens list. No ending position is specified, but instead this function returns a tuple containing the parsed base.DecisionSpecifier along with an index in the tokens list where the end of the specifier was found.

For example:

>>> pf = ParseFormat()
>>> pf.parseDecisionSpecifierFromTokens(['m'])
(DecisionSpecifier(domain=None, zone=None, name='m'), 0)
>>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
(12, 0)
>>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
(DecisionSpecifier(domain=None, zone=None, name='a'), 0)
>>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
(DecisionSpecifier(domain=None, zone=None, name='m'), 1)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'm']
... )
(DecisionSpecifier(domain='a', zone=None, name='m'), 2)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'm']
... )
(DecisionSpecifier(domain=None, zone='a', name='m'), 2)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(DecisionSpecifier(domain=None, zone='a', name='b'), 2)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(DecisionSpecifier(domain='a', zone='b', name='m'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
... )
(DecisionSpecifier(domain=None, zone='a', name='b'), 2)
>>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
...     ['a', Lexeme.zoneSeparator, '5'],
... )
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifierFromTokens(
...     ['d', Lexeme.domainSeparator, '123']
... )
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     1
... )
Traceback (most recent call last):
...
ParseError...
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     2
... )
(DecisionSpecifier(domain='b', zone=None, name='m'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ]
... )
(DecisionSpecifier(domain='a', zone='b', name='c'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     2
... )
(DecisionSpecifier(domain=None, zone='b', name='c'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     4
... )
(DecisionSpecifier(domain=None, zone='c', name='m'), 6)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'set',
...         'main',
...         Lexeme.domainSeparator,
...         'zone',
...         Lexeme.zoneSeparator,
...         'compass',
...         'north',
...         'bounce',
...     ],
...     1
... )
(DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
def parseDecisionSpecifier(self, specString: str) -> Union[int, exploration.base.DecisionSpecifier]:
2744    def parseDecisionSpecifier(
2745        self,
2746        specString: str
2747    ) -> Union[base.DecisionID, base.DecisionSpecifier]:
2748        """
2749        Parses a full `DecisionSpecifier` from a single string. Can
2750        parse integer decision IDs in string form, and returns a
2751        `DecisionID` in that case, otherwise returns a
2752        `DecisionSpecifier`. Assumes that all int-convertible strings
2753        are decision IDs, so it cannot deal with feature names which are
2754        just numbers.
2755
2756        For example:
2757
2758        >>> pf = ParseFormat()
2759        >>> pf.parseDecisionSpecifier('example')
2760        DecisionSpecifier(domain=None, zone=None, name='example')
2761        >>> pf.parseDecisionSpecifier('outer::example')
2762        DecisionSpecifier(domain=None, zone='outer', name='example')
2763        >>> pf.parseDecisionSpecifier('domain//region::feature')
2764        DecisionSpecifier(domain='domain', zone='region', name='feature')
2765        >>> pf.parseDecisionSpecifier('123')
2766        123
2767        >>> pf.parseDecisionSpecifier('region::domain//feature')
2768        Traceback (most recent call last):
2769        ...
2770        exploration.base.InvalidDecisionSpecifierError...
2771        >>> pf.parseDecisionSpecifier('domain1//domain2//feature')
2772        Traceback (most recent call last):
2773        ...
2774        exploration.base.InvalidDecisionSpecifierError...
2775        >>> pf.parseDecisionSpecifier('domain//123')
2776        Traceback (most recent call last):
2777        ...
2778        exploration.base.InvalidDecisionSpecifierError...
2779        >>> pf.parseDecisionSpecifier('region::123')
2780        Traceback (most recent call last):
2781        ...
2782        exploration.base.InvalidDecisionSpecifierError...
2783        """
2784        try:
2785            return int(specString)
2786        except ValueError:
2787            tokens = self.lex(specString)
2788            result, end = self.parseDecisionSpecifierFromTokens(tokens)
2789            if end != len(tokens) - 1:
2790                raise base.InvalidDecisionSpecifierError(
2791                    f"Junk after end of decision specifier:"
2792                    f"\n{tokens[end + 1:]}"
2793                )
2794            return result

Parses a full DecisionSpecifier from a single string. Can parse integer decision IDs in string form, and returns a DecisionID in that case, otherwise returns a DecisionSpecifier. Assumes that all int-convertible strings are decision IDs, so it cannot deal with feature names which are just numbers.

For example:

>>> pf = ParseFormat()
>>> pf.parseDecisionSpecifier('example')
DecisionSpecifier(domain=None, zone=None, name='example')
>>> pf.parseDecisionSpecifier('outer::example')
DecisionSpecifier(domain=None, zone='outer', name='example')
>>> pf.parseDecisionSpecifier('domain//region::feature')
DecisionSpecifier(domain='domain', zone='region', name='feature')
>>> pf.parseDecisionSpecifier('123')
123
>>> pf.parseDecisionSpecifier('region::domain//feature')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifier('domain1//domain2//feature')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifier('domain//123')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifier('region::123')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
def parseFeatureSpecifierFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, limit: int = -1) -> Tuple[exploration.base.FeatureSpecifier, int]:
2796    def parseFeatureSpecifierFromTokens(
2797        self,
2798        tokens: LexedTokens,
2799        start: int = 0,
2800        limit: int = -1
2801    ) -> Tuple[base.FeatureSpecifier, int]:
2802        """
2803        Parses a `FeatureSpecifier` starting from the specified part of
2804        a tokens list. Returns a tuple containing the feature specifier
2805        and the end position of the end of the feature specifier.
2806
2807        Can parse integer feature IDs in string form, as well as nested
2808        feature specifiers and plain feature specifiers. Assumes that
2809        all int-convertible strings are feature IDs, so it cannot deal
2810        with feature names which are just numbers.
2811
2812        For example:
2813
2814        >>> pf = ParseFormat()
2815        >>> pf.parseFeatureSpecifierFromTokens(['example'])
2816        (FeatureSpecifier(domain=None, within=[], feature='example',\
2817 part=None), 0)
2818        >>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
2819        (FeatureSpecifier(domain=None, within=[], feature='example2',\
2820 part=None), 1)
2821        >>> pf.parseFeatureSpecifierFromTokens(
2822        ...     [
2823        ...         'domain',
2824        ...         Lexeme.domainSeparator,
2825        ...         'region',
2826        ...         Lexeme.zoneSeparator,
2827        ...         'feature',
2828        ...         Lexeme.partSeparator,
2829        ...         'part'
2830        ...     ]
2831        ... )
2832        (FeatureSpecifier(domain='domain', within=['region'],\
2833 feature='feature', part='part'), 6)
2834        >>> pf.parseFeatureSpecifierFromTokens(
2835        ...     [
2836        ...         'outerRegion',
2837        ...         Lexeme.zoneSeparator,
2838        ...         'midRegion',
2839        ...         Lexeme.zoneSeparator,
2840        ...         'innerRegion',
2841        ...         Lexeme.zoneSeparator,
2842        ...         'feature'
2843        ...     ]
2844        ... )
2845        (FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
2846 'innerRegion'], feature='feature', part=None), 6)
2847        >>> pf.parseFeatureSpecifierFromTokens(
2848        ...     [
2849        ...         'outerRegion',
2850        ...         Lexeme.zoneSeparator,
2851        ...         'midRegion',
2852        ...         Lexeme.zoneSeparator,
2853        ...         'innerRegion',
2854        ...         Lexeme.zoneSeparator,
2855        ...         'feature'
2856        ...     ],
2857        ...     1
2858        ... )
2859        Traceback (most recent call last):
2860        ...
2861        exploration.parsing.InvalidFeatureSpecifierError...
2862        >>> pf.parseFeatureSpecifierFromTokens(
2863        ...     [
2864        ...         'outerRegion',
2865        ...         Lexeme.zoneSeparator,
2866        ...         'midRegion',
2867        ...         Lexeme.zoneSeparator,
2868        ...         'innerRegion',
2869        ...         Lexeme.zoneSeparator,
2870        ...         'feature'
2871        ...     ],
2872        ...     2
2873        ... )
2874        (FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'],\
2875 feature='feature', part=None), 6)
2876        >>> pf.parseFeatureSpecifierFromTokens(
2877        ...     [
2878        ...         'outerRegion',
2879        ...         Lexeme.zoneSeparator,
2880        ...         'feature',
2881        ...         Lexeme.domainSeparator,
2882        ...         'after',
2883        ...     ]
2884        ... )
2885        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2886 feature='feature', part=None), 2)
2887        >>> pf.parseFeatureSpecifierFromTokens(
2888        ...     [
2889        ...         'outerRegion',
2890        ...         Lexeme.zoneSeparator,
2891        ...         'feature',
2892        ...         Lexeme.domainSeparator,
2893        ...         'after',
2894        ...     ],
2895        ...     2
2896        ... )
2897        (FeatureSpecifier(domain='feature', within=[], feature='after',\
2898 part=None), 4)
2899        >>> # Including a limit:
2900        >>> pf.parseFeatureSpecifierFromTokens(
2901        ...     [
2902        ...         'outerRegion',
2903        ...         Lexeme.zoneSeparator,
2904        ...         'midRegion',
2905        ...         Lexeme.zoneSeparator,
2906        ...         'feature',
2907        ...     ],
2908        ...     0,
2909        ...     2
2910        ... )
2911        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2912 feature='midRegion', part=None), 2)
2913        >>> pf.parseFeatureSpecifierFromTokens(
2914        ...     [
2915        ...         'outerRegion',
2916        ...         Lexeme.zoneSeparator,
2917        ...         'midRegion',
2918        ...         Lexeme.zoneSeparator,
2919        ...         'feature',
2920        ...     ],
2921        ...     0,
2922        ...     0
2923        ... )
2924        (FeatureSpecifier(domain=None, within=[], feature='outerRegion',\
2925 part=None), 0)
2926        >>> pf.parseFeatureSpecifierFromTokens(
2927        ...     [
2928        ...         'region',
2929        ...         Lexeme.zoneSeparator,
2930        ...         Lexeme.zoneSeparator,
2931        ...         'feature',
2932        ...     ]
2933        ... )
2934        (FeatureSpecifier(domain=None, within=[], feature='region',\
2935 part=None), 0)
2936        """
2937        start, limit, nTokens = normalizeEnds(tokens, start, limit)
2938
2939        if nTokens == 0:
2940            raise InvalidFeatureSpecifierError(
2941                "Can't parse a feature specifier from 0 tokens."
2942            )
2943        first = tokens[start]
2944        if isinstance(first, Lexeme):
2945            raise InvalidFeatureSpecifierError(
2946                f"Feature specifier can't begin with a special token."
2947                f"Got:\n{tokens[start:limit + 1]}"
2948            )
2949
2950        if nTokens in (1, 2):
2951            # 2 tokens isn't enough for a second part
2952            fs = base.FeatureSpecifier(
2953                domain=None,
2954                within=[],
2955                feature=first,
2956                part=None
2957            )
2958            return (base.normalizeFeatureSpecifier(fs), start)
2959
2960        firstSep = tokens[start + 1]
2961        secondPart = tokens[start + 2]
2962
2963        if (
2964            firstSep not in (
2965                Lexeme.domainSeparator,
2966                Lexeme.zoneSeparator,
2967                Lexeme.partSeparator
2968            )
2969         or not isinstance(secondPart, str)
2970        ):
2971            # Following tokens won't work out
2972            fs = base.FeatureSpecifier(
2973                domain=None,
2974                within=[],
2975                feature=first,
2976                part=None
2977            )
2978            return (base.normalizeFeatureSpecifier(fs), start)
2979
2980        if firstSep == Lexeme.domainSeparator:
2981            if start + 2 > limit:
2982                return (
2983                    base.FeatureSpecifier(
2984                        domain=first,
2985                        within=[],
2986                        feature=secondPart,
2987                        part=None
2988                    ),
2989                    start + 2
2990                )
2991            else:
2992                rest, restEnd = self.parseFeatureSpecifierFromTokens(
2993                    tokens,
2994                    start + 2,
2995                    limit
2996                )
2997                if rest.domain is not None:  # two domainSeparators in a row
2998                    fs = base.FeatureSpecifier(
2999                        domain=first,
3000                        within=[],
3001                        feature=rest.domain,
3002                        part=None
3003                    )
3004                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3005                else:
3006                    fs = base.FeatureSpecifier(
3007                        domain=first,
3008                        within=rest.within,
3009                        feature=rest.feature,
3010                        part=rest.part
3011                    )
3012                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3013
3014        elif firstSep == Lexeme.zoneSeparator:
3015            if start + 2 > limit:
3016                fs = base.FeatureSpecifier(
3017                    domain=None,
3018                    within=[first],
3019                    feature=secondPart,
3020                    part=None
3021                )
3022                return (base.normalizeFeatureSpecifier(fs), start + 2)
3023            else:
3024                rest, restEnd = self.parseFeatureSpecifierFromTokens(
3025                    tokens,
3026                    start + 2,
3027                    limit
3028                )
3029                if rest.domain is not None:  # domain sep after zone sep
3030                    fs = base.FeatureSpecifier(
3031                        domain=None,
3032                        within=[first],
3033                        feature=rest.domain,
3034                        part=None
3035                    )
3036                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3037                else:
3038                    within = [first]
3039                    within.extend(rest.within)
3040                    fs = base.FeatureSpecifier(
3041                        domain=None,
3042                        within=within,
3043                        feature=rest.feature,
3044                        part=rest.part
3045                    )
3046                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3047
3048        else:  # must be partSeparator
3049            fs = base.FeatureSpecifier(
3050                domain=None,
3051                within=[],
3052                feature=first,
3053                part=secondPart
3054            )
3055            return (base.normalizeFeatureSpecifier(fs), start + 2)

Parses a FeatureSpecifier starting from the specified part of a tokens list. Returns a tuple containing the feature specifier and the end position of the end of the feature specifier.

Can parse integer feature IDs in string form, as well as nested feature specifiers and plain feature specifiers. Assumes that all int-convertible strings are feature IDs, so it cannot deal with feature names which are just numbers.

For example:

>>> pf = ParseFormat()
>>> pf.parseFeatureSpecifierFromTokens(['example'])
(FeatureSpecifier(domain=None, within=[], feature='example', part=None), 0)
>>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
(FeatureSpecifier(domain=None, within=[], feature='example2', part=None), 1)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'domain',
...         Lexeme.domainSeparator,
...         'region',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.partSeparator,
...         'part'
...     ]
... )
(FeatureSpecifier(domain='domain', within=['region'], feature='feature', part='part'), 6)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'innerRegion',
...         Lexeme.zoneSeparator,
...         'feature'
...     ]
... )
(FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion', 'innerRegion'], feature='feature', part=None), 6)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'innerRegion',
...         Lexeme.zoneSeparator,
...         'feature'
...     ],
...     1
... )
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'innerRegion',
...         Lexeme.zoneSeparator,
...         'feature'
...     ],
...     2
... )
(FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'], feature='feature', part=None), 6)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.domainSeparator,
...         'after',
...     ]
... )
(FeatureSpecifier(domain=None, within=['outerRegion'], feature='feature', part=None), 2)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.domainSeparator,
...         'after',
...     ],
...     2
... )
(FeatureSpecifier(domain='feature', within=[], feature='after', part=None), 4)
>>> # Including a limit:
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...     ],
...     0,
...     2
... )
(FeatureSpecifier(domain=None, within=['outerRegion'], feature='midRegion', part=None), 2)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...     ],
...     0,
...     0
... )
(FeatureSpecifier(domain=None, within=[], feature='outerRegion', part=None), 0)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'region',
...         Lexeme.zoneSeparator,
...         Lexeme.zoneSeparator,
...         'feature',
...     ]
... )
(FeatureSpecifier(domain=None, within=[], feature='region', part=None), 0)
def parseFeatureSpecifier(self, specString: str) -> exploration.base.FeatureSpecifier:
3057    def parseFeatureSpecifier(self, specString: str) -> base.FeatureSpecifier:
3058        """
3059        Parses a full `FeatureSpecifier` from a single string. See
3060        `parseFeatureSpecifierFromTokens`.
3061
3062        >>> pf = ParseFormat()
3063        >>> pf.parseFeatureSpecifier('example')
3064        FeatureSpecifier(domain=None, within=[], feature='example', part=None)
3065        >>> pf.parseFeatureSpecifier('outer::example')
3066        FeatureSpecifier(domain=None, within=['outer'], feature='example',\
3067 part=None)
3068        >>> pf.parseFeatureSpecifier('example%%middle')
3069        FeatureSpecifier(domain=None, within=[], feature='example',\
3070 part='middle')
3071        >>> pf.parseFeatureSpecifier('domain//region::feature%%part')
3072        FeatureSpecifier(domain='domain', within=['region'],\
3073 feature='feature', part='part')
3074        >>> pf.parseFeatureSpecifier(
3075        ...     'outerRegion::midRegion::innerRegion::feature'
3076        ... )
3077        FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
3078 'innerRegion'], feature='feature', part=None)
3079        >>> pf.parseFeatureSpecifier('region::domain//feature')
3080        Traceback (most recent call last):
3081        ...
3082        exploration.parsing.InvalidFeatureSpecifierError...
3083        >>> pf.parseFeatureSpecifier('feature%%part1%%part2')
3084        Traceback (most recent call last):
3085        ...
3086        exploration.parsing.InvalidFeatureSpecifierError...
3087        >>> pf.parseFeatureSpecifier('domain1//domain2//feature')
3088        Traceback (most recent call last):
3089        ...
3090        exploration.parsing.InvalidFeatureSpecifierError...
3091        >>> # TODO: Issue warnings for these...
3092        >>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
3093        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3094        >>> pf.parseFeatureSpecifier('region::123')  # zone discarded
3095        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3096        >>> pf.parseFeatureSpecifier('123%%part')
3097        FeatureSpecifier(domain=None, within=[], feature=123, part='part')
3098        """
3099        tokens = self.lex(specString)
3100        result, rEnd = self.parseFeatureSpecifierFromTokens(tokens)
3101        if rEnd != len(tokens) - 1:
3102            raise InvalidFeatureSpecifierError(
3103                f"Feature specifier has extra stuff at end:"
3104                f" {tokens[rEnd + 1:]}"
3105            )
3106        else:
3107            return result

Parses a full FeatureSpecifier from a single string. See parseFeatureSpecifierFromTokens.

>>> pf = ParseFormat()
>>> pf.parseFeatureSpecifier('example')
FeatureSpecifier(domain=None, within=[], feature='example', part=None)
>>> pf.parseFeatureSpecifier('outer::example')
FeatureSpecifier(domain=None, within=['outer'], feature='example', part=None)
>>> pf.parseFeatureSpecifier('example%%middle')
FeatureSpecifier(domain=None, within=[], feature='example', part='middle')
>>> pf.parseFeatureSpecifier('domain//region::feature%%part')
FeatureSpecifier(domain='domain', within=['region'], feature='feature', part='part')
>>> pf.parseFeatureSpecifier(
...     'outerRegion::midRegion::innerRegion::feature'
... )
FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion', 'innerRegion'], feature='feature', part=None)
>>> pf.parseFeatureSpecifier('region::domain//feature')
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> pf.parseFeatureSpecifier('feature%%part1%%part2')
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> pf.parseFeatureSpecifier('domain1//domain2//feature')
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> # TODO: Issue warnings for these...
>>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
FeatureSpecifier(domain=None, within=[], feature=123, part=None)
>>> pf.parseFeatureSpecifier('region::123')  # zone discarded
FeatureSpecifier(domain=None, within=[], feature=123, part=None)
>>> pf.parseFeatureSpecifier('123%%part')
FeatureSpecifier(domain=None, within=[], feature=123, part='part')
def normalizeFeatureSpecifier( self, spec: Union[int, str, exploration.base.FeatureSpecifier]) -> exploration.base.FeatureSpecifier:
3109    def normalizeFeatureSpecifier(
3110        self,
3111        spec: base.AnyFeatureSpecifier
3112    ) -> base.FeatureSpecifier:
3113        """
3114        Normalizes any kind of feature specifier into an official
3115        `FeatureSpecifier` tuple.
3116
3117        For example:
3118
3119        >>> pf = ParseFormat()
3120        >>> pf.normalizeFeatureSpecifier('town')
3121        FeatureSpecifier(domain=None, within=[], feature='town', part=None)
3122        >>> pf.normalizeFeatureSpecifier(5)
3123        FeatureSpecifier(domain=None, within=[], feature=5, part=None)
3124        >>> pf.parseFeatureSpecifierFromTokens(
3125        ...     [
3126        ...         'domain',
3127        ...         Lexeme.domainSeparator,
3128        ...         'region',
3129        ...         Lexeme.zoneSeparator,
3130        ...         'feature',
3131        ...         Lexeme.partSeparator,
3132        ...         'part'
3133        ...     ]
3134        ... )
3135        (FeatureSpecifier(domain='domain', within=['region'],\
3136 feature='feature', part='part'), 6)
3137        >>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
3138        FeatureSpecifier(domain='dom', within=['one', 'two'],\
3139 feature='three', part='middle')
3140        >>> pf.normalizeFeatureSpecifier(
3141        ...   base.FeatureSpecifier(None, ['region'], 'place', None)
3142        ... )
3143        FeatureSpecifier(domain=None, within=['region'], feature='place',\
3144 part=None)
3145        >>> fs = base.FeatureSpecifier(None, [], 'place', None)
3146        >>> ns = pf.normalizeFeatureSpecifier(fs)
3147        >>> ns is fs  # Doesn't create unnecessary clones
3148        True
3149        """
3150        if isinstance(spec, base.FeatureSpecifier):
3151            return spec
3152        elif isinstance(spec, base.FeatureID):
3153            return base.FeatureSpecifier(None, [], spec, None)
3154        elif isinstance(spec, str):
3155            return self.parseFeatureSpecifier(spec)
3156        else:
3157            raise TypeError(f"Invalid feature specifier type: '{type(spec)}'")

Normalizes any kind of feature specifier into an official FeatureSpecifier tuple.

For example:

>>> pf = ParseFormat()
>>> pf.normalizeFeatureSpecifier('town')
FeatureSpecifier(domain=None, within=[], feature='town', part=None)
>>> pf.normalizeFeatureSpecifier(5)
FeatureSpecifier(domain=None, within=[], feature=5, part=None)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'domain',
...         Lexeme.domainSeparator,
...         'region',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.partSeparator,
...         'part'
...     ]
... )
(FeatureSpecifier(domain='domain', within=['region'], feature='feature', part='part'), 6)
>>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
FeatureSpecifier(domain='dom', within=['one', 'two'], feature='three', part='middle')
>>> pf.normalizeFeatureSpecifier(
...   base.FeatureSpecifier(None, ['region'], 'place', None)
... )
FeatureSpecifier(domain=None, within=['region'], feature='place', part=None)
>>> fs = base.FeatureSpecifier(None, [], 'place', None)
>>> ns = pf.normalizeFeatureSpecifier(fs)
>>> ns is fs  # Doesn't create unnecessary clones
True
def unparseChallenge(self, challenge: exploration.base.Challenge) -> str:
3159    def unparseChallenge(self, challenge: base.Challenge) -> str:
3160        """
3161        Turns a `base.Challenge` into a string that can be turned back
3162        into an equivalent challenge by `parseChallenge`. For example:
3163
3164        >>> pf = ParseFormat()
3165        >>> c = base.challenge(
3166        ...     skills=base.BestSkill('brains', 'brawn'),
3167        ...     level=2,
3168        ...     success=[base.effect(set=('switch', 'on'))],
3169        ...     failure=[
3170        ...         base.effect(deactivate=True, delay=1),
3171        ...         base.effect(bounce=True)
3172        ...     ],
3173        ...     outcome=True
3174        ... )
3175        >>> r = pf.unparseChallenge(c)
3176        >>> r
3177        '<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
3178        >>> pf.parseChallenge(r) == c
3179        True
3180        >>> c2 = base.challenge(
3181        ...     skills=base.CombinedSkill(
3182        ...         -2,
3183        ...         base.ConditionalSkill(
3184        ...             base.ReqCapability('tough'),
3185        ...             base.BestSkill(1),
3186        ...             base.BestSkill(-1)
3187        ...         )
3188        ...     ),
3189        ...     level=-2,
3190        ...     success=[base.effect(gain='orb')],
3191        ...     failure=[],
3192        ...     outcome=None
3193        ... )
3194        >>> r2 = pf.unparseChallenge(c2)
3195        >>> r2
3196        '<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
3197        >>> # TODO: let this parse through without BestSkills...
3198        >>> pf.parseChallenge(r2) == c2
3199        True
3200        """
3201        lt = self.formatDict[Lexeme.angleLeft]
3202        gt = self.formatDict[Lexeme.angleRight]
3203        result = (
3204            lt + str(challenge['level']) + gt
3205          + challenge['skills'].unparse()
3206        )
3207        if challenge['outcome'] is True:
3208            result += gt
3209        result += self.unparseConsequence(challenge['success'])
3210        if challenge['outcome'] is False:
3211            result += gt
3212        result += self.unparseConsequence(challenge['failure'])
3213        return result

Turns a base.Challenge into a string that can be turned back into an equivalent challenge by parseChallenge. For example:

>>> pf = ParseFormat()
>>> c = base.challenge(
...     skills=base.BestSkill('brains', 'brawn'),
...     level=2,
...     success=[base.effect(set=('switch', 'on'))],
...     failure=[
...         base.effect(deactivate=True, delay=1),
...         base.effect(bounce=True)
...     ],
...     outcome=True
... )
>>> r = pf.unparseChallenge(c)
>>> r
'<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
>>> pf.parseChallenge(r) == c
True
>>> c2 = base.challenge(
...     skills=base.CombinedSkill(
...         -2,
...         base.ConditionalSkill(
...             base.ReqCapability('tough'),
...             base.BestSkill(1),
...             base.BestSkill(-1)
...         )
...     ),
...     level=-2,
...     success=[base.effect(gain='orb')],
...     failure=[],
...     outcome=None
... )
>>> r2 = pf.unparseChallenge(c2)
>>> r2
'<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
>>> # TODO: let this parse through without BestSkills...
>>> pf.parseChallenge(r2) == c2
True
def unparseCondition(self, condition: exploration.base.Condition) -> str:
3215    def unparseCondition(self, condition: base.Condition) -> str:
3216        """
3217        Given a `base.Condition` returns a string that would result in
3218        that condition if given to `parseCondition`. For example:
3219
3220        >>> pf = ParseFormat()
3221        >>> c = base.condition(
3222        ...     condition=base.ReqAny([
3223        ...         base.ReqCapability('brawny'),
3224        ...         base.ReqNot(base.ReqTokens('weights', 3))
3225        ...     ]),
3226        ...     consequence=[base.effect(gain='power')]
3227        ... )
3228        >>> r = pf.unparseCondition(c)
3229        >>> r
3230        '??((brawny|!(weights*3))){gain power}{}'
3231        >>> pf.parseCondition(r) == c
3232        True
3233        """
3234        return (
3235            self.formatDict[Lexeme.doubleQuestionmark]
3236          + self.formatDict[Lexeme.openParen]
3237          + condition['condition'].unparse()
3238          + self.formatDict[Lexeme.closeParen]
3239          + self.unparseConsequence(condition['consequence'])
3240          + self.unparseConsequence(condition['alternative'])
3241        )

Given a base.Condition returns a string that would result in that condition if given to parseCondition. For example:

>>> pf = ParseFormat()
>>> c = base.condition(
...     condition=base.ReqAny([
...         base.ReqCapability('brawny'),
...         base.ReqNot(base.ReqTokens('weights', 3))
...     ]),
...     consequence=[base.effect(gain='power')]
... )
>>> r = pf.unparseCondition(c)
>>> r
'??((brawny|!(weights*3))){gain power}{}'
>>> pf.parseCondition(r) == c
True
def unparseConsequence( self, consequence: List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]) -> str:
3243    def unparseConsequence(self, consequence: base.Consequence) -> str:
3244        """
3245        Given a `base.Consequence`, returns a string encoding of it,
3246        using the same format that `parseConsequence` will parse. Uses
3247        function-call-like syntax and curly braces to denote different
3248        sub-consequences. See also `SkillCombination.unparse` and
3249        `Requirement.unparse` For example:
3250
3251        >>> pf = ParseFormat()
3252        >>> c = [base.effect(gain='one'), base.effect(lose='one')]
3253        >>> pf.unparseConsequence(c)
3254        '{gain one; lose one}'
3255        >>> c = [
3256        ...     base.challenge(
3257        ...         skills=base.BestSkill('brains', 'brawn'),
3258        ...         level=2,
3259        ...         success=[base.effect(set=('switch', 'on'))],
3260        ...         failure=[
3261        ...             base.effect(deactivate=True, delay=1),
3262        ...             base.effect(bounce=True)
3263        ...         ],
3264        ...         outcome=True
3265        ...     )
3266        ... ]
3267        >>> pf.unparseConsequence(c)
3268        '{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
3269        >>> c[0]['outcome'] = False
3270        >>> pf.unparseConsequence(c)
3271        '{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
3272        >>> c[0]['outcome'] = None
3273        >>> pf.unparseConsequence(c)
3274        '{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
3275        >>> c = [
3276        ...     base.condition(
3277        ...         condition=base.ReqAny([
3278        ...             base.ReqCapability('brawny'),
3279        ...             base.ReqNot(base.ReqTokens('weights', 3))
3280        ...         ]),
3281        ...         consequence=[
3282        ...             base.challenge(
3283        ...                 skills=base.CombinedSkill('brains', 'brawn'),
3284        ...                 level=3,
3285        ...                 success=[base.effect(goto='home')],
3286        ...                 failure=[base.effect(bounce=True)],
3287        ...                 outcome=None
3288        ...             )
3289        ...         ]  # no alternative -> empty list
3290        ...     )
3291        ... ]
3292        >>> pf.unparseConsequence(c)
3293        '{??((brawny|!(weights*3))){\
3294<3>sum(brains, brawn){goto home}{bounce}}{}}'
3295        >>> c = [base.effect(gain='if(power){gain "mimic"}')]
3296        >>> # TODO: Make this work!
3297        >>> # pf.unparseConsequence(c)
3298
3299        '{gain "if(power){gain \\\\"mimic\\\\"}"}'
3300        """
3301        result = self.formatDict[Lexeme.openCurly]
3302        for item in consequence:
3303            if 'skills' in item:  # a Challenge
3304                item = cast(base.Challenge, item)
3305                result += self.unparseChallenge(item)
3306
3307            elif 'value' in item:  # an Effect
3308                item = cast(base.Effect, item)
3309                result += self.unparseEffect(item)
3310
3311            elif 'condition' in item:  # a Condition
3312                item = cast(base.Condition, item)
3313                result += self.unparseCondition(item)
3314
3315            else:  # bad dict
3316                raise TypeError(
3317                    f"Invalid consequence: items in the list must be"
3318                    f" Effects, Challenges, or Conditions (got a dictionary"
3319                    f" without 'skills', 'value', or 'condition' keys)."
3320                    f"\nGot item: {repr(item)}"
3321                )
3322            result += '; '
3323
3324        if result.endswith('; '):
3325            result = result[:-2]
3326
3327        return result + self.formatDict[Lexeme.closeCurly]

Given a base.Consequence, returns a string encoding of it, using the same format that parseConsequence will parse. Uses function-call-like syntax and curly braces to denote different sub-consequences. See also SkillCombination.unparse and Requirement.unparse For example:

>>> pf = ParseFormat()
>>> c = [base.effect(gain='one'), base.effect(lose='one')]
>>> pf.unparseConsequence(c)
'{gain one; lose one}'
>>> c = [
...     base.challenge(
...         skills=base.BestSkill('brains', 'brawn'),
...         level=2,
...         success=[base.effect(set=('switch', 'on'))],
...         failure=[
...             base.effect(deactivate=True, delay=1),
...             base.effect(bounce=True)
...         ],
...         outcome=True
...     )
... ]
>>> pf.unparseConsequence(c)
'{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
>>> c[0]['outcome'] = False
>>> pf.unparseConsequence(c)
'{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
>>> c[0]['outcome'] = None
>>> pf.unparseConsequence(c)
'{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
>>> c = [
...     base.condition(
...         condition=base.ReqAny([
...             base.ReqCapability('brawny'),
...             base.ReqNot(base.ReqTokens('weights', 3))
...         ]),
...         consequence=[
...             base.challenge(
...                 skills=base.CombinedSkill('brains', 'brawn'),
...                 level=3,
...                 success=[base.effect(goto='home')],
...                 failure=[base.effect(bounce=True)],
...                 outcome=None
...             )
...         ]  # no alternative -> empty list
...     )
... ]
>>> pf.unparseConsequence(c)
'{??((brawny|!(weights*3))){<3>sum(brains, brawn){goto home}{bounce}}{}}'
>>> c = [base.effect(gain='if(power){gain "mimic"}')]
>>> # TODO: Make this work!
>>> # pf.unparseConsequence(c)

'{gain "if(power){gain \"mimic\"}"}'

def parseMechanismSpecifierFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0) -> Tuple[exploration.base.MechanismSpecifier, int]:
3329    def parseMechanismSpecifierFromTokens(
3330        self,
3331        tokens: LexedTokens,
3332        start: int = 0
3333    ) -> Tuple[base.MechanismSpecifier, int]:
3334        """
3335        Parses a mechanism specifier starting at the specified position
3336        in the given tokens list. No ending position is specified, but
3337        instead this function returns a tuple containing the parsed
3338        `base.MechanismSpecifier` along with an index in the tokens list
3339        where the end of the specifier was found.
3340
3341        For example:
3342
3343        >>> pf = ParseFormat()
3344        >>> pf.parseMechanismSpecifierFromTokens(['m'])
3345        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3346 name='m'), 0)
3347        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
3348        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3349 name='a'), 0)
3350        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
3351        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3352 name='m'), 1)
3353        >>> pf.parseMechanismSpecifierFromTokens(
3354        ...     ['a', Lexeme.domainSeparator, 'm']
3355        ... )
3356        (MechanismSpecifier(domain='a', zone=None, decision=None,\
3357 name='m'), 2)
3358        >>> pf.parseMechanismSpecifierFromTokens(
3359        ...     ['a', Lexeme.zoneSeparator, 'm']
3360        ... )
3361        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3362 name='m'), 2)
3363        >>> pf.parseMechanismSpecifierFromTokens(
3364        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
3365        ... )
3366        (MechanismSpecifier(domain=None, zone='a', decision='b',\
3367 name='m'), 4)
3368        >>> pf.parseMechanismSpecifierFromTokens(
3369        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
3370        ... )
3371        (MechanismSpecifier(domain='a', zone=None, decision='b',\
3372 name='m'), 4)
3373        >>> pf.parseMechanismSpecifierFromTokens(
3374        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
3375        ... )
3376        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3377 name='b'), 2)
3378        >>> pf.parseMechanismSpecifierFromTokens(
3379        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3380        ...     1
3381        ... )
3382        Traceback (most recent call last):
3383        ...
3384        exploration.parsing.ParseError...
3385        >>> pf.parseMechanismSpecifierFromTokens(
3386        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3387        ...     2
3388        ... )
3389        (MechanismSpecifier(domain='b', zone=None, decision=None,\
3390 name='m'), 4)
3391        >>> pf.parseMechanismSpecifierFromTokens(
3392        ...     [
3393        ...         'a',
3394        ...         Lexeme.domainSeparator,
3395        ...         'b',
3396        ...         Lexeme.zoneSeparator,
3397        ...         'c',
3398        ...         Lexeme.zoneSeparator,
3399        ...         'm'
3400        ...     ]
3401        ... )
3402        (MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
3403        >>> pf.parseMechanismSpecifierFromTokens(
3404        ...     [
3405        ...         'a',
3406        ...         Lexeme.domainSeparator,
3407        ...         'b',
3408        ...         Lexeme.zoneSeparator,
3409        ...         'c',
3410        ...         Lexeme.zoneSeparator,
3411        ...         'm'
3412        ...     ],
3413        ...     2
3414        ... )
3415        (MechanismSpecifier(domain=None, zone='b', decision='c',\
3416 name='m'), 6)
3417        >>> pf.parseMechanismSpecifierFromTokens(
3418        ...     [
3419        ...         'a',
3420        ...         Lexeme.domainSeparator,
3421        ...         'b',
3422        ...         Lexeme.zoneSeparator,
3423        ...         'c',
3424        ...         Lexeme.zoneSeparator,
3425        ...         'm'
3426        ...     ],
3427        ...     4
3428        ... )
3429        (MechanismSpecifier(domain=None, zone=None, decision='c',\
3430 name='m'), 6)
3431        >>> pf.parseMechanismSpecifierFromTokens(
3432        ...     [
3433        ...         'roomB',
3434        ...         Lexeme.zoneSeparator,
3435        ...         'switch',
3436        ...         Lexeme.mechanismSeparator,
3437        ...         'on'
3438        ...     ]
3439        ... )
3440        (MechanismSpecifier(domain=None, zone=None, decision='roomB',\
3441 name='switch'), 2)
3442        """
3443        start, tEnd, nLeft = normalizeEnds(tokens, start, -1)
3444
3445        try:
3446            dSpec, dEnd = self.parseDecisionSpecifierFromTokens(
3447                tokens,
3448                start
3449            )
3450        except ParseError:
3451            raise ParseError(
3452                "Failed to parse mechanism specifier couldn't parse"
3453                " initial mechanism name."
3454            )
3455
3456        if isinstance(dSpec, int):
3457            raise ParseError(
3458                f"Invalid mechanism specifier: cannot use a decision ID"
3459                f" as the decision part. Got: {tokens[start:]}"
3460            )
3461            # TODO: Allow that?
3462
3463        mDomain = dSpec.domain
3464        if dEnd == tEnd or dEnd == tEnd - 1:
3465            return (
3466                base.MechanismSpecifier(
3467                    domain=mDomain,
3468                    zone=None,
3469                    decision=dSpec.zone,
3470                    name=dSpec.name
3471                ),
3472                dEnd
3473            )
3474
3475        sep = tokens[dEnd + 1]
3476        after = tokens[dEnd + 2]
3477
3478        if sep == Lexeme.zoneSeparator:
3479            if isinstance(after, Lexeme):
3480                return (
3481                    base.MechanismSpecifier(
3482                        domain=mDomain,
3483                        zone=None,
3484                        decision=dSpec.zone,
3485                        name=dSpec.name
3486                    ),
3487                    dEnd
3488                )
3489            else:
3490                return (
3491                    base.MechanismSpecifier(
3492                        domain=mDomain,
3493                        zone=dSpec.zone,
3494                        decision=dSpec.name,
3495                        name=after
3496                    ),
3497                    dEnd + 2
3498                )
3499        else:
3500            return (
3501                base.MechanismSpecifier(
3502                    domain=mDomain,
3503                    zone=None,
3504                    decision=dSpec.zone,
3505                    name=dSpec.name
3506                ),
3507                dEnd
3508            )

Parses a mechanism specifier starting at the specified position in the given tokens list. No ending position is specified, but instead this function returns a tuple containing the parsed base.MechanismSpecifier along with an index in the tokens list where the end of the specifier was found.

For example:

>>> pf = ParseFormat()
>>> pf.parseMechanismSpecifierFromTokens(['m'])
(MechanismSpecifier(domain=None, zone=None, decision=None, name='m'), 0)
>>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
(MechanismSpecifier(domain=None, zone=None, decision=None, name='a'), 0)
>>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
(MechanismSpecifier(domain=None, zone=None, decision=None, name='m'), 1)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'm']
... )
(MechanismSpecifier(domain='a', zone=None, decision=None, name='m'), 2)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'm']
... )
(MechanismSpecifier(domain=None, zone=None, decision='a', name='m'), 2)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(MechanismSpecifier(domain=None, zone='a', decision='b', name='m'), 4)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(MechanismSpecifier(domain='a', zone=None, decision='b', name='m'), 4)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
... )
(MechanismSpecifier(domain=None, zone=None, decision='a', name='b'), 2)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     1
... )
Traceback (most recent call last):
...
ParseError...
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     2
... )
(MechanismSpecifier(domain='b', zone=None, decision=None, name='m'), 4)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ]
... )
(MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     2
... )
(MechanismSpecifier(domain=None, zone='b', decision='c', name='m'), 6)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     4
... )
(MechanismSpecifier(domain=None, zone=None, decision='c', name='m'), 6)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'roomB',
...         Lexeme.zoneSeparator,
...         'switch',
...         Lexeme.mechanismSeparator,
...         'on'
...     ]
... )
(MechanismSpecifier(domain=None, zone=None, decision='roomB', name='switch'), 2)
def groupReqTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> List[Union[Lexeme, str, List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]]]:
3510    def groupReqTokens(
3511        self,
3512        tokens: LexedTokens,
3513        start: int = 0,
3514        end: int = -1
3515    ) -> GroupedTokens:
3516        """
3517        Groups tokens for a requirement, stripping out all parentheses
3518        but replacing parenthesized expressions with sub-lists of tokens.
3519
3520        For example:
3521
3522        >>> pf = ParseFormat()
3523        >>> pf.groupReqTokens(['jump'])
3524        ['jump']
3525        >>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
3526        Traceback (most recent call last):
3527        ...
3528        exploration.parsing.ParseError...
3529        >>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
3530        Traceback (most recent call last):
3531        ...
3532        exploration.parsing.ParseError...
3533        >>> pf.groupReqTokens(['jump', Lexeme.closeParen])
3534        Traceback (most recent call last):
3535        ...
3536        exploration.parsing.ParseError...
3537        >>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
3538        [['jump']]
3539        >>> pf.groupReqTokens(
3540        ...     [
3541        ...         Lexeme.openParen,
3542        ...         'jump',
3543        ...         Lexeme.orBar,
3544        ...         'climb',
3545        ...         Lexeme.closeParen,
3546        ...         Lexeme.ampersand,
3547        ...         'crawl',
3548        ...     ]
3549        ... )
3550        [['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>,\
3551 'crawl']
3552        """
3553        start, end, nTokens = normalizeEnds(tokens, start, end)
3554        if nTokens == 0:
3555            raise ParseError("Ran out of tokens.")
3556
3557        resultsStack: List[GroupedTokens] = [[]]
3558        here = start
3559        while here <= end:
3560            token = tokens[here]
3561            here += 1
3562            if token == Lexeme.closeParen:
3563                if len(resultsStack) == 1:
3564                    raise ParseError(
3565                        f"Too many closing parens at index {here - 1}"
3566                        f" in:\n{tokens[start:end + 1]}"
3567                    )
3568                else:
3569                    closed = resultsStack.pop()
3570                    resultsStack[-1].append(closed)
3571            elif token == Lexeme.openParen:
3572                resultsStack.append([])
3573            else:
3574                resultsStack[-1].append(token)
3575        if len(resultsStack) != 1:
3576            raise ParseError(
3577                f"Mismatched parentheses in tokens:"
3578                f"\n{tokens[start:end + 1]}"
3579            )
3580        return resultsStack[0]

Groups tokens for a requirement, stripping out all parentheses but replacing parenthesized expressions with sub-lists of tokens.

For example:

>>> pf = ParseFormat()
>>> pf.groupReqTokens(['jump'])
['jump']
>>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
Traceback (most recent call last):
...
ParseError...
>>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
Traceback (most recent call last):
...
ParseError...
>>> pf.groupReqTokens(['jump', Lexeme.closeParen])
Traceback (most recent call last):
...
ParseError...
>>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
[['jump']]
>>> pf.groupReqTokens(
...     [
...         Lexeme.openParen,
...         'jump',
...         Lexeme.orBar,
...         'climb',
...         Lexeme.closeParen,
...         Lexeme.ampersand,
...         'crawl',
...     ]
... )
[['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>, 'crawl']
def groupReqTokensByPrecedence( self, tokenGroups: List[Union[Lexeme, str, List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]]]) -> List[Union[Lexeme, exploration.base.Requirement, List[Union[Lexeme, exploration.base.Requirement, ForwardRef('GroupedRequirementParts')]]]]:
3582    def groupReqTokensByPrecedence(
3583        self,
3584        tokenGroups: GroupedTokens
3585    ) -> GroupedRequirementParts:
3586        """
3587        Re-groups requirement tokens that have been grouped using
3588        `groupReqTokens` according to operator precedence, effectively
3589        creating an equivalent result which would have been obtained by
3590        `groupReqTokens` if all possible non-redundant explicit
3591        parentheses had been included.
3592
3593        Also turns each leaf part into a `Requirement`.
3594
3595        TODO: Make this actually reasonably efficient T_T
3596
3597        Examples:
3598
3599        >>> pf = ParseFormat()
3600        >>> r = pf.parseRequirement('capability&roomB::switch:on')
3601        >>> pf.groupReqTokensByPrecedence(
3602        ...     [
3603        ...         ['jump', Lexeme.orBar, 'climb'],
3604        ...         Lexeme.ampersand,
3605        ...         Lexeme.notMarker,
3606        ...         'coin',
3607        ...         Lexeme.tokenCount,
3608        ...         '3'
3609        ...     ]
3610        ... )
3611        [\
3612[\
3613[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]],\
3614 <Lexeme.ampersand: ...>,\
3615 [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]\
3616]\
3617]
3618        """
3619        subgrouped: List[Union[Lexeme, str, GroupedRequirementParts]] = []
3620        # First recursively group all parenthesized expressions
3621        for i, item in enumerate(tokenGroups):
3622            if isinstance(item, list):
3623                subgrouped.append(self.groupReqTokensByPrecedence(item))
3624            else:
3625                subgrouped.append(item)
3626
3627        # Now process all leaf requirements
3628        leavesConverted: GroupedRequirementParts = []
3629        i = 0
3630        while i < len(subgrouped):
3631            gItem = subgrouped[i]
3632
3633            if isinstance(gItem, list):
3634                leavesConverted.append(gItem)
3635            elif isinstance(gItem, Lexeme):
3636                leavesConverted.append(gItem)
3637            elif i == len(subgrouped) - 1:
3638                if isinstance(gItem, Lexeme):
3639                    raise ParseError(
3640                        f"Lexeme at end of requirement. Grouped tokens:"
3641                        f"\n{tokenGroups}"
3642                    )
3643                else:
3644                    assert isinstance(gItem, str)
3645                    if gItem == 'X':
3646                        leavesConverted.append(base.ReqImpossible())
3647                    elif gItem == 'O':
3648                        leavesConverted.append(base.ReqNothing())
3649                    else:
3650                        leavesConverted.append(base.ReqCapability(gItem))
3651            else:
3652                assert isinstance(gItem, str)
3653                try:
3654                    # TODO: Avoid list copy here...
3655                    couldBeMechanismSpecifier: LexedTokens = []
3656                    for ii in range(i, len(subgrouped)):
3657                        lexemeOrStr = subgrouped[ii]
3658                        if isinstance(lexemeOrStr, (Lexeme, str)):
3659                            couldBeMechanismSpecifier.append(lexemeOrStr)
3660                        else:
3661                            break
3662                    mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
3663                        couldBeMechanismSpecifier
3664                    )
3665                    mEnd += i
3666                    if (
3667                        mEnd >= len(subgrouped) - 2
3668                     or subgrouped[mEnd + 1] != Lexeme.mechanismSeparator
3669                    ):
3670                        raise ParseError("Not a mechanism requirement.")
3671
3672                    mState = subgrouped[mEnd + 2]
3673                    if not isinstance(mState, base.MechanismState):
3674                        raise ParseError("Not a mechanism requirement.")
3675                    leavesConverted.append(base.ReqMechanism(mSpec, mState))
3676                    i = mEnd + 2  # + 1 will happen automatically below
3677                except ParseError:
3678                    following = subgrouped[i + 1]
3679                    if following in (
3680                        Lexeme.tokenCount,
3681                        Lexeme.mechanismSeparator,
3682                        Lexeme.wigglyLine,
3683                        Lexeme.skillLevel
3684                    ):
3685                        if (
3686                            i == len(subgrouped) - 2
3687                         or isinstance(subgrouped[i + 2], Lexeme)
3688                        ):
3689                            if following == Lexeme.wigglyLine:
3690                                # Default tag value is 1
3691                                leavesConverted.append(base.ReqTag(gItem, 1))
3692                                i += 1  # another +1 automatic below
3693                            else:
3694                                raise ParseError(
3695                                    f"Lexeme at end of requirement. Grouped"
3696                                    f" tokens:\n{tokenGroups}"
3697                                )
3698                        else:
3699                            afterwards = subgrouped[i + 2]
3700                            if not isinstance(afterwards, str):
3701                                raise ParseError(
3702                                    f"Lexeme after token/mechanism/tag/skill"
3703                                    f" separator at index {i}."
3704                                    f" Grouped tokens:\n{tokenGroups}"
3705                                )
3706                            i += 2  # another +1 automatic below
3707                            if following == Lexeme.tokenCount:
3708                                try:
3709                                    tCount = int(afterwards)
3710                                except ValueError:
3711                                    raise ParseError(
3712                                        f"Token count could not be"
3713                                        f" parsed as an integer:"
3714                                        f" {afterwards!r}. Grouped"
3715                                        f" tokens:\n{tokenGroups}"
3716                                    )
3717                                leavesConverted.append(
3718                                    base.ReqTokens(gItem, tCount)
3719                                )
3720                            elif following == Lexeme.mechanismSeparator:
3721                                leavesConverted.append(
3722                                    base.ReqMechanism(gItem, afterwards)
3723                                )
3724                            elif following == Lexeme.wigglyLine:
3725                                tVal = self.parseTagValue(afterwards)
3726                                leavesConverted.append(
3727                                    base.ReqTag(gItem, tVal)
3728                                )
3729                            else:
3730                                assert following == Lexeme.skillLevel
3731                                try:
3732                                    sLevel = int(afterwards)
3733                                except ValueError:
3734                                    raise ParseError(
3735                                        f"Skill level could not be"
3736                                        f" parsed as an integer:"
3737                                        f" {afterwards!r}. Grouped"
3738                                        f" tokens:\n{tokenGroups}"
3739                                    )
3740                                leavesConverted.append(
3741                                    base.ReqLevel(gItem, sLevel)
3742                                )
3743                    else:
3744                        if gItem == 'X':
3745                            leavesConverted.append(base.ReqImpossible())
3746                        elif gItem == 'O':
3747                            leavesConverted.append(base.ReqNothing())
3748                        else:
3749                            leavesConverted.append(
3750                                base.ReqCapability(gItem)
3751                            )
3752
3753            # Finally, increment our index:
3754            i += 1
3755
3756        # Now group all NOT operators
3757        i = 0
3758        notsGrouped: GroupedRequirementParts = []
3759        while i < len(leavesConverted):
3760            leafItem = leavesConverted[i]
3761            group = []
3762            while leafItem == Lexeme.notMarker:
3763                group.append(leafItem)
3764                i += 1
3765                if i >= len(leavesConverted):
3766                    raise ParseError(
3767                        f"NOT at end of tokens:\n{leavesConverted}"
3768                    )
3769                leafItem = leavesConverted[i]
3770            if group == []:
3771                notsGrouped.append(leafItem)
3772                i += 1
3773            else:
3774                group.append(leafItem)
3775                i += 1
3776                notsGrouped.append(group)
3777
3778        # Next group all AND operators
3779        i = 0
3780        andsGrouped: GroupedRequirementParts = []
3781        while i < len(notsGrouped):
3782            notGroupItem = notsGrouped[i]
3783            if notGroupItem == Lexeme.ampersand:
3784                if i == len(notsGrouped) - 1:
3785                    raise ParseError(
3786                        f"AND at end of group in tokens:"
3787                        f"\n{tokenGroups}"
3788                        f"Which had been grouped into:"
3789                        f"\n{notsGrouped}"
3790                    )
3791                itemAfter = notsGrouped[i + 1]
3792                if isinstance(itemAfter, Lexeme):
3793                    raise ParseError(
3794                        f"Lexeme after AND in of group in tokens:"
3795                        f"\n{tokenGroups}"
3796                        f"Which had been grouped into:"
3797                        f"\n{notsGrouped}"
3798                    )
3799                assert isinstance(itemAfter, (base.Requirement, list))
3800                prev = andsGrouped[-1]
3801                if (
3802                    isinstance(prev, list)
3803                and len(prev) > 2
3804                and prev[1] == Lexeme.ampersand
3805                ):
3806                    prev.extend(notsGrouped[i:i + 2])
3807                    i += 1  # with an extra +1 below
3808                else:
3809                    andsGrouped.append(
3810                        [andsGrouped.pop()] + notsGrouped[i:i + 2]
3811                    )
3812                    i += 1 # extra +1 below
3813            else:
3814                andsGrouped.append(notGroupItem)
3815            i += 1
3816
3817        # Finally check that we only have OR operators left over
3818        i = 0
3819        finalResult: GroupedRequirementParts = []
3820        while i < len(andsGrouped):
3821            andGroupItem = andsGrouped[i]
3822            if andGroupItem == Lexeme.orBar:
3823                if i == len(andsGrouped) - 1:
3824                    raise ParseError(
3825                        f"OR at end of group in tokens:"
3826                        f"\n{tokenGroups}"
3827                        f"Which had been grouped into:"
3828                        f"\n{andsGrouped}"
3829                    )
3830                itemAfter = andsGrouped[i + 1]
3831                if isinstance(itemAfter, Lexeme):
3832                    raise ParseError(
3833                        f"Lexeme after OR in of group in tokens:"
3834                        f"\n{tokenGroups}"
3835                        f"Which had been grouped into:"
3836                        f"\n{andsGrouped}"
3837                    )
3838                assert isinstance(itemAfter, (base.Requirement, list))
3839                prev = finalResult[-1]
3840                if (
3841                    isinstance(prev, list)
3842                and len(prev) > 2
3843                and prev[1] == Lexeme.orBar
3844                ):
3845                    prev.extend(andsGrouped[i:i + 2])
3846                    i += 1  # with an extra +1 below
3847                else:
3848                    finalResult.append(
3849                        [finalResult.pop()] + andsGrouped[i:i + 2]
3850                    )
3851                    i += 1 # extra +1 below
3852            elif isinstance(andGroupItem, Lexeme):
3853                raise ParseError(
3854                    f"Leftover lexeme when grouping ORs at index {i}"
3855                    f" in grouped tokens:\n{andsGrouped}"
3856                    f"\nOriginal tokens were:\n{tokenGroups}"
3857                )
3858            else:
3859                finalResult.append(andGroupItem)
3860            i += 1
3861
3862        return finalResult

Re-groups requirement tokens that have been grouped using groupReqTokens according to operator precedence, effectively creating an equivalent result which would have been obtained by groupReqTokens if all possible non-redundant explicit parentheses had been included.

Also turns each leaf part into a Requirement.

TODO: Make this actually reasonably efficient T_T

Examples:

>>> pf = ParseFormat()
>>> r = pf.parseRequirement('capability&roomB::switch:on')
>>> pf.groupReqTokensByPrecedence(
...     [
...         ['jump', Lexeme.orBar, 'climb'],
...         Lexeme.ampersand,
...         Lexeme.notMarker,
...         'coin',
...         Lexeme.tokenCount,
...         '3'
...     ]
... )
[[[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]], <Lexeme.ampersand: ...>, [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]]]
def parseRequirementFromRegroupedTokens( self, reqGroups: List[Union[Lexeme, exploration.base.Requirement, List[Union[Lexeme, exploration.base.Requirement, ForwardRef('GroupedRequirementParts')]]]]) -> exploration.base.Requirement:
3864    def parseRequirementFromRegroupedTokens(
3865        self,
3866        reqGroups: GroupedRequirementParts
3867    ) -> base.Requirement:
3868        """
3869        Recursive parser that works once tokens have been turned into
3870        requirements at the leaves and grouped by operator precedence
3871        otherwise (see `groupReqTokensByPrecedence`).
3872
3873        TODO: Simply by just doing this while grouping... ?
3874        """
3875        if len(reqGroups) == 0:
3876            raise ParseError("Ran out of tokens.")
3877
3878        elif len(reqGroups) == 1:
3879            only = reqGroups[0]
3880            if isinstance(only, list):
3881                return self.parseRequirementFromRegroupedTokens(only)
3882            elif isinstance(only, base.Requirement):
3883                return only
3884            else:
3885                raise ParseError(f"Invalid singleton group:\n{only}")
3886        elif reqGroups[0] == Lexeme.notMarker:
3887            if (
3888                not all(x == Lexeme.notMarker for x in reqGroups[:-1])
3889             or not isinstance(reqGroups[-1], (list, base.Requirement))
3890            ):
3891                raise ParseError(f"Invalid negation group:\n{reqGroups}")
3892            result = reqGroups[-1]
3893            if isinstance(result, list):
3894                result = self.parseRequirementFromRegroupedTokens(result)
3895            assert isinstance(result, base.Requirement)
3896            for i in range(len(reqGroups) - 1):
3897                result = base.ReqNot(result)
3898            return result
3899        elif len(reqGroups) % 2 == 0:
3900            raise ParseError(f"Even-length non-negation group:\n{reqGroups}")
3901        else:
3902            if (
3903                reqGroups[1] not in (Lexeme.ampersand, Lexeme.orBar)
3904             or not all(
3905                    reqGroups[i] == reqGroups[1]
3906                    for i in range(1, len(reqGroups), 2)
3907                )
3908            ):
3909                raise ParseError(
3910                    f"Inconsistent operator(s) in group:\n{reqGroups}"
3911                )
3912            op = reqGroups[1]
3913            operands = [
3914                (
3915                    self.parseRequirementFromRegroupedTokens(x)
3916                    if isinstance(x, list)
3917                    else x
3918                )
3919                for x in reqGroups[::2]
3920            ]
3921            if not all(isinstance(x, base.Requirement) for x in operands):
3922                raise ParseError(
3923                    f"Item not reducible to Requirement in AND group:"
3924                    f"\n{reqGroups}"
3925                )
3926            reqSequence = cast(Sequence[base.Requirement], operands)
3927            if op == Lexeme.ampersand:
3928                return base.ReqAll(reqSequence).flatten()
3929            else:
3930                assert op == Lexeme.orBar
3931                return base.ReqAny(reqSequence).flatten()

Recursive parser that works once tokens have been turned into requirements at the leaves and grouped by operator precedence otherwise (see groupReqTokensByPrecedence).

TODO: Simply by just doing this while grouping... ?

def parseRequirementFromGroupedTokens( self, tokenGroups: List[Union[Lexeme, str, List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]]]) -> exploration.base.Requirement:
3933    def parseRequirementFromGroupedTokens(
3934        self,
3935        tokenGroups: GroupedTokens
3936    ) -> base.Requirement:
3937        """
3938        Parses a `base.Requirement` from a pre-grouped tokens list (see
3939        `groupReqTokens`). Uses the 'orBar', 'ampersand', 'notMarker',
3940        'tokenCount', and 'mechanismSeparator' `Lexeme`s to provide
3941        'or', 'and', and 'not' operators along with distinguishing
3942        between capabilities, tokens, and mechanisms.
3943
3944        Precedence ordering is not, then and, then or, but you are
3945        encouraged to use parentheses for explicit grouping (the
3946        'openParen' and 'closeParen' `Lexeme`s, although these must be
3947        handled by `groupReqTokens` so this function won't see them
3948        directly).
3949
3950        You can also use 'X' (without quotes) for a never-satisfied
3951        requirement, and 'O' (without quotes) for an always-satisfied
3952        requirement.
3953
3954        Note that when '!' is applied to a token requirement it flips
3955        the sense of the integer from 'must have at least this many' to
3956        'must have strictly less than this many'.
3957
3958        Raises a `ParseError` if the grouped tokens it is given cannot
3959        be parsed as a `Requirement`.
3960
3961        Examples:
3962
3963        >>> pf = ParseFormat()
3964        >>> pf.parseRequirementFromGroupedTokens(['capability'])
3965        ReqCapability('capability')
3966        >>> pf.parseRequirementFromGroupedTokens(
3967        ...     ['token', Lexeme.tokenCount, '3']
3968        ... )
3969        ReqTokens('token', 3)
3970        >>> pf.parseRequirementFromGroupedTokens(
3971        ...     ['mechanism', Lexeme.mechanismSeparator, 'state']
3972        ... )
3973        ReqMechanism('mechanism', 'state')
3974        >>> pf.parseRequirementFromGroupedTokens(
3975        ...     ['capability', Lexeme.orBar, 'token',
3976        ...      Lexeme.tokenCount, '3']
3977        ... )
3978        ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
3979        >>> pf.parseRequirementFromGroupedTokens(
3980        ...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
3981        ... )
3982        ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]),\
3983 ReqCapability('three')])
3984        >>> pf.parseRequirementFromGroupedTokens(
3985        ...     [
3986        ...         'one',
3987        ...         Lexeme.ampersand,
3988        ...         [
3989        ...              'two',
3990        ...              Lexeme.orBar,
3991        ...              'three'
3992        ...         ]
3993        ...     ]
3994        ... )
3995        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
3996 ReqCapability('three')])])
3997        >>> pf.parseRequirementFromTokens(['X'])
3998        ReqImpossible()
3999        >>> pf.parseRequirementFromTokens(['O'])
4000        ReqNothing()
4001        >>> pf.parseRequirementFromTokens(
4002        ...     [Lexeme.openParen, 'O', Lexeme.closeParen]
4003        ... )
4004        ReqNothing()
4005        """
4006        if len(tokenGroups) == 0:
4007            raise ParseError("Ran out of tokens.")
4008
4009        reGrouped = self.groupReqTokensByPrecedence(tokenGroups)
4010
4011        return self.parseRequirementFromRegroupedTokens(reGrouped)

Parses a base.Requirement from a pre-grouped tokens list (see groupReqTokens). Uses the 'orBar', 'ampersand', 'notMarker', 'tokenCount', and 'mechanismSeparator' Lexemes to provide 'or', 'and', and 'not' operators along with distinguishing between capabilities, tokens, and mechanisms.

Precedence ordering is not, then and, then or, but you are encouraged to use parentheses for explicit grouping (the 'openParen' and 'closeParen' Lexemes, although these must be handled by groupReqTokens so this function won't see them directly).

You can also use 'X' (without quotes) for a never-satisfied requirement, and 'O' (without quotes) for an always-satisfied requirement.

Note that when '!' is applied to a token requirement it flips the sense of the integer from 'must have at least this many' to 'must have strictly less than this many'.

Raises a ParseError if the grouped tokens it is given cannot be parsed as a Requirement.

Examples:

>>> pf = ParseFormat()
>>> pf.parseRequirementFromGroupedTokens(['capability'])
ReqCapability('capability')
>>> pf.parseRequirementFromGroupedTokens(
...     ['token', Lexeme.tokenCount, '3']
... )
ReqTokens('token', 3)
>>> pf.parseRequirementFromGroupedTokens(
...     ['mechanism', Lexeme.mechanismSeparator, 'state']
... )
ReqMechanism('mechanism', 'state')
>>> pf.parseRequirementFromGroupedTokens(
...     ['capability', Lexeme.orBar, 'token',
...      Lexeme.tokenCount, '3']
... )
ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
>>> pf.parseRequirementFromGroupedTokens(
...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
... )
ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]), ReqCapability('three')])
>>> pf.parseRequirementFromGroupedTokens(
...     [
...         'one',
...         Lexeme.ampersand,
...         [
...              'two',
...              Lexeme.orBar,
...              'three'
...         ]
...     ]
... )
ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'), ReqCapability('three')])])
>>> pf.parseRequirementFromTokens(['X'])
ReqImpossible()
>>> pf.parseRequirementFromTokens(['O'])
ReqNothing()
>>> pf.parseRequirementFromTokens(
...     [Lexeme.openParen, 'O', Lexeme.closeParen]
... )
ReqNothing()
def parseRequirementFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Requirement:
4013    def parseRequirementFromTokens(
4014        self,
4015        tokens: LexedTokens,
4016        start: int = 0,
4017        end: int = -1
4018    ) -> base.Requirement:
4019        """
4020        Parses a requirement from `LexedTokens` by grouping them first
4021        and then using `parseRequirementFromGroupedTokens`.
4022
4023        For example:
4024
4025        >>> pf = ParseFormat()
4026        >>> pf.parseRequirementFromTokens(
4027        ...     [
4028        ...         'one',
4029        ...         Lexeme.ampersand,
4030        ...         Lexeme.openParen,
4031        ...         'two',
4032        ...         Lexeme.orBar,
4033        ...         'three',
4034        ...         Lexeme.closeParen
4035        ...     ]
4036        ... )
4037        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4038 ReqCapability('three')])])
4039        """
4040        grouped = self.groupReqTokens(tokens, start, end)
4041        return self.parseRequirementFromGroupedTokens(grouped)

Parses a requirement from LexedTokens by grouping them first and then using parseRequirementFromGroupedTokens.

For example:

>>> pf = ParseFormat()
>>> pf.parseRequirementFromTokens(
...     [
...         'one',
...         Lexeme.ampersand,
...         Lexeme.openParen,
...         'two',
...         Lexeme.orBar,
...         'three',
...         Lexeme.closeParen
...     ]
... )
ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'), ReqCapability('three')])])
def parseRequirement(self, encoded: str) -> exploration.base.Requirement:
4043    def parseRequirement(self, encoded: str) -> base.Requirement:
4044        """
4045        Parses a `base.Requirement` from a string by calling `lex` and
4046        then feeding it into `ParseFormat.parseRequirementFromTokens`.
4047        As stated in `parseRequirementFromTokens`, the precedence
4048        binding order is NOT, then AND, then OR.
4049
4050        For example:
4051
4052        >>> pf = ParseFormat()
4053        >>> pf.parseRequirement('! coin * 3')
4054        ReqNot(ReqTokens('coin', 3))
4055        >>> pf.parseRequirement(
4056        ...     '  oneWord | "two words"|"three  words words" '
4057        ... )
4058        ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'),\
4059 ReqCapability('"three  words words"')])
4060        >>> pf.parseRequirement('words-with-dashes')
4061        ReqCapability('words-with-dashes')
4062        >>> r = pf.parseRequirement('capability&roomB::switch:on')
4063        >>> r
4064        ReqAll([ReqCapability('capability'),\
4065 ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB',\
4066 name='switch'), 'on')])
4067        >>> r.unparse()
4068        '(capability&roomB::switch:on)'
4069        >>> pf.parseRequirement('!!!one')
4070        ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
4071        >>> pf.parseRequirement('domain//zone::where::mechanism:state')
4072        ReqMechanism(MechanismSpecifier(domain='domain', zone='zone',\
4073 decision='where', name='mechanism'), 'state')
4074        >>> pf.parseRequirement('domain//mechanism:state')
4075        ReqMechanism(MechanismSpecifier(domain='domain', zone=None,\
4076 decision=None, name='mechanism'), 'state')
4077        >>> pf.parseRequirement('where::mechanism:state')
4078        ReqMechanism(MechanismSpecifier(domain=None, zone=None,\
4079 decision='where', name='mechanism'), 'state')
4080        >>> pf.parseRequirement('zone::where::mechanism:state')
4081        ReqMechanism(MechanismSpecifier(domain=None, zone='zone',\
4082 decision='where', name='mechanism'), 'state')
4083        >>> pf.parseRequirement('tag~')
4084        ReqTag('tag', 1)
4085        >>> pf.parseRequirement('tag~&tag2~')
4086        ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
4087        >>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
4088        ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3),\
4089 ReqTag('tag', 3.5), ReqLevel('skill', 3)])
4090        >>> pf.parseRequirement('tag~True|tag~False|tag~None')
4091        ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])
4092
4093        Precedence examples:
4094
4095        >>> pf.parseRequirement('A|B&C')
4096        ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'),\
4097 ReqCapability('C')])])
4098        >>> pf.parseRequirement('A&B|C')
4099        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4100 ReqCapability('C')])
4101        >>> pf.parseRequirement('(A&B)|C')
4102        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4103 ReqCapability('C')])
4104        >>> pf.parseRequirement('(A&B|C)&D')
4105        ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4106 ReqCapability('C')]), ReqCapability('D')])
4107
4108        Error examples:
4109
4110        >>> pf.parseRequirement('one ! Word')
4111        Traceback (most recent call last):
4112        ...
4113        exploration.parsing.ParseError...
4114        >>> pf.parseRequirement('a|')
4115        Traceback (most recent call last):
4116        ...
4117        exploration.parsing.ParseError...
4118        >>> pf.parseRequirement('b!')
4119        Traceback (most recent call last):
4120        ...
4121        exploration.parsing.ParseError...
4122        >>> pf.parseRequirement('*emph*')
4123        Traceback (most recent call last):
4124        ...
4125        exploration.parsing.ParseError...
4126        >>> pf.parseRequirement('one&&two')
4127        Traceback (most recent call last):
4128        ...
4129        exploration.parsing.ParseError...
4130        >>> pf.parseRequirement('one!|two')
4131        Traceback (most recent call last):
4132        ...
4133        exploration.parsing.ParseError...
4134        >>> pf.parseRequirement('one*two')
4135        Traceback (most recent call last):
4136        ...
4137        exploration.parsing.ParseError...
4138        >>> pf.parseRequirement('one*')
4139        Traceback (most recent call last):
4140        ...
4141        exploration.parsing.ParseError...
4142        >>> pf.parseRequirement('()')
4143        Traceback (most recent call last):
4144        ...
4145        exploration.parsing.ParseError...
4146        >>> pf.parseRequirement('(one)*3')
4147        Traceback (most recent call last):
4148        ...
4149        exploration.parsing.ParseError...
4150        >>> pf.parseRequirement('a:')
4151        Traceback (most recent call last):
4152        ...
4153        exploration.parsing.ParseError...
4154        >>> pf.parseRequirement('a:b:c')
4155        Traceback (most recent call last):
4156        ...
4157        exploration.parsing.ParseError...
4158        >>> pf.parseRequirement('where::capability')
4159        Traceback (most recent call last):
4160        ...
4161        exploration.parsing.ParseError...
4162        """
4163        return self.parseRequirementFromTokens(
4164            lex(encoded, self.reverseFormat)
4165        )

Parses a base.Requirement from a string by calling lex and then feeding it into ParseFormat.parseRequirementFromTokens. As stated in parseRequirementFromTokens, the precedence binding order is NOT, then AND, then OR.

For example:

>>> pf = ParseFormat()
>>> pf.parseRequirement('! coin * 3')
ReqNot(ReqTokens('coin', 3))
>>> pf.parseRequirement(
...     '  oneWord | "two words"|"three  words words" '
... )
ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'), ReqCapability('"three  words words"')])
>>> pf.parseRequirement('words-with-dashes')
ReqCapability('words-with-dashes')
>>> r = pf.parseRequirement('capability&roomB::switch:on')
>>> r
ReqAll([ReqCapability('capability'), ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB', name='switch'), 'on')])
>>> r.unparse()
'(capability&roomB::switch:on)'
>>> pf.parseRequirement('!!!one')
ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
>>> pf.parseRequirement('domain//zone::where::mechanism:state')
ReqMechanism(MechanismSpecifier(domain='domain', zone='zone', decision='where', name='mechanism'), 'state')
>>> pf.parseRequirement('domain//mechanism:state')
ReqMechanism(MechanismSpecifier(domain='domain', zone=None, decision=None, name='mechanism'), 'state')
>>> pf.parseRequirement('where::mechanism:state')
ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='where', name='mechanism'), 'state')
>>> pf.parseRequirement('zone::where::mechanism:state')
ReqMechanism(MechanismSpecifier(domain=None, zone='zone', decision='where', name='mechanism'), 'state')
>>> pf.parseRequirement('tag~')
ReqTag('tag', 1)
>>> pf.parseRequirement('tag~&tag2~')
ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
>>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3), ReqTag('tag', 3.5), ReqLevel('skill', 3)])
>>> pf.parseRequirement('tag~True|tag~False|tag~None')
ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])

Precedence examples:

>>> pf.parseRequirement('A|B&C')
ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'), ReqCapability('C')])])
>>> pf.parseRequirement('A&B|C')
ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]), ReqCapability('C')])
>>> pf.parseRequirement('(A&B)|C')
ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]), ReqCapability('C')])
>>> pf.parseRequirement('(A&B|C)&D')
ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]), ReqCapability('C')]), ReqCapability('D')])

Error examples:

>>> pf.parseRequirement('one ! Word')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('a|')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('b!')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('*emph*')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one&&two')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one!|two')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one*two')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one*')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('()')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('(one)*3')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('a:')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('a:b:c')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('where::capability')
Traceback (most recent call last):
...
ParseError...
def parseSkillCombinationFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> Union[str, exploration.base.SkillCombination]:
4167    def parseSkillCombinationFromTokens(
4168        self,
4169        tokens: LexedTokens,
4170        start: int = 0,
4171        end: int = -1
4172    ) -> Union[base.Skill, base.SkillCombination]:
4173        """
4174        Parses a skill combination from the specified range within the
4175        given tokens list. If just a single string token is selected, it
4176        will be returned as a `base.BestSkill` with just that skill
4177        inside.
4178
4179        For example:
4180
4181        >>> pf = ParseFormat()
4182        >>> pf.parseSkillCombinationFromTokens(['climbing'])
4183        BestSkill('climbing')
4184        >>> tokens = [
4185        ...     'best',
4186        ...     Lexeme.openParen,
4187        ...     'brains',
4188        ...     Lexeme.sepOrDelay,
4189        ...     'brawn',
4190        ...     Lexeme.closeParen,
4191        ... ]
4192        >>> pf.parseSkillCombinationFromTokens(tokens)
4193        BestSkill('brains', 'brawn')
4194        >>> tokens[2] = '3'  # not a lexeme so it's a string
4195        >>> pf.parseSkillCombinationFromTokens(tokens)
4196        BestSkill(3, 'brawn')
4197        >>> tokens = [
4198        ...     Lexeme.wigglyLine,
4199        ...     Lexeme.wigglyLine,
4200        ...     'yes',
4201        ... ]
4202        >>> pf.parseSkillCombinationFromTokens(tokens)
4203        InverseSkill(InverseSkill('yes'))
4204        """
4205        start, end, nTokens = normalizeEnds(tokens, start, end)
4206
4207        first = tokens[start]
4208        if nTokens == 1:
4209            if isinstance(first, base.Skill):
4210                try:
4211                    level = int(first)
4212                    return base.BestSkill(level)
4213                except ValueError:
4214                    return base.BestSkill(first)
4215            else:
4216                raise ParseError(
4217                    "Invalid SkillCombination:\n{tokens[start:end + 1]"
4218                )
4219
4220        if first == Lexeme.wigglyLine:
4221            inv = self.parseSkillCombinationFromTokens(
4222                tokens,
4223                start + 1,
4224                end
4225            )
4226            if isinstance(inv, base.BestSkill) and len(inv.skills) == 1:
4227                return base.InverseSkill(inv.skills[0])
4228            else:
4229                return base.InverseSkill(inv)
4230
4231        second = tokens[start + 1]
4232        if second != Lexeme.openParen:
4233            raise ParseError(
4234                f"Invalid SkillCombination (missing paren):"
4235                f"\n{tokens[start:end + 1]}"
4236            )
4237
4238        parenEnd = self.matchingBrace(
4239            tokens,
4240            start + 1,
4241            Lexeme.openParen,
4242            Lexeme.closeParen
4243        )
4244        if parenEnd != end:
4245            raise ParseError(
4246                f"Extra junk after SkillCombination:"
4247                f"\n{tokens[parenEnd + 1:end + 1]}"
4248            )
4249
4250        if first == 'if':
4251            parts = list(
4252                findSeparatedParts(
4253                    tokens,
4254                    Lexeme.sepOrDelay,
4255                    start + 2,
4256                    end - 1,
4257                    Lexeme.openParen,
4258                    Lexeme.closeParen
4259                )
4260            )
4261            if len(parts) != 3:
4262                raise ParseError(
4263                    f"Wrong number of parts for ConditionalSkill (needs"
4264                    f" 3, got {len(parts)}:"
4265                    f"\n{tokens[start + 2:end]}"
4266                )
4267            reqStart, reqEnd = parts[0]
4268            ifStart, ifEnd = parts[1]
4269            elseStart, elseEnd = parts[2]
4270            return base.ConditionalSkill(
4271                self.parseRequirementFromTokens(tokens, reqStart, reqEnd),
4272                self.parseSkillCombinationFromTokens(tokens, ifStart, ifEnd),
4273                self.parseSkillCombinationFromTokens(
4274                    tokens,
4275                    elseStart,
4276                    elseEnd
4277                ),
4278            )
4279        elif first in ('sum', 'best', 'worst'):
4280            make: type[base.SkillCombination]
4281            if first == 'sum':
4282                make = base.CombinedSkill
4283            elif first == 'best':
4284                make = base.BestSkill
4285            else:
4286                make = base.WorstSkill
4287
4288            subs = []
4289            for partStart, partEnd in findSeparatedParts(
4290                tokens,
4291                Lexeme.sepOrDelay,
4292                start + 2,
4293                end - 1,
4294                Lexeme.openParen,
4295                Lexeme.closeParen
4296            ):
4297                sub = self.parseSkillCombinationFromTokens(
4298                    tokens,
4299                    partStart,
4300                    partEnd
4301                )
4302                if (
4303                    isinstance(sub, base.BestSkill)
4304                and len(sub.skills) == 1
4305                ):
4306                    subs.append(sub.skills[0])
4307                else:
4308                    subs.append(sub)
4309
4310            return make(*subs)
4311        else:
4312            raise ParseError(
4313                "Invalid SkillCombination:\n{tokens[start:end + 1]"
4314            )

Parses a skill combination from the specified range within the given tokens list. If just a single string token is selected, it will be returned as a base.BestSkill with just that skill inside.

For example:

>>> pf = ParseFormat()
>>> pf.parseSkillCombinationFromTokens(['climbing'])
BestSkill('climbing')
>>> tokens = [
...     'best',
...     Lexeme.openParen,
...     'brains',
...     Lexeme.sepOrDelay,
...     'brawn',
...     Lexeme.closeParen,
... ]
>>> pf.parseSkillCombinationFromTokens(tokens)
BestSkill('brains', 'brawn')
>>> tokens[2] = '3'  # not a lexeme so it's a string
>>> pf.parseSkillCombinationFromTokens(tokens)
BestSkill(3, 'brawn')
>>> tokens = [
...     Lexeme.wigglyLine,
...     Lexeme.wigglyLine,
...     'yes',
... ]
>>> pf.parseSkillCombinationFromTokens(tokens)
InverseSkill(InverseSkill('yes'))
def parseSkillCombination(self, encoded: str) -> exploration.base.SkillCombination:
4316    def parseSkillCombination(
4317        self,
4318        encoded: str
4319    ) -> base.SkillCombination:
4320        """
4321        Parses a `SkillCombination` from a string. Calls `lex` and then
4322        `parseSkillCombinationFromTokens`.
4323        """
4324        result = self.parseSkillCombinationFromTokens(
4325            lex(encoded, self.reverseFormat)
4326        )
4327        if not isinstance(result, base.SkillCombination):
4328            return base.BestSkill(result)
4329        else:
4330            return result

Parses a SkillCombination from a string. Calls lex and then parseSkillCombinationFromTokens.

def parseConditionFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Condition:
4332    def parseConditionFromTokens(
4333        self,
4334        tokens: LexedTokens,
4335        start: int = 0,
4336        end: int = -1
4337    ) -> base.Condition:
4338        """
4339        Parses a `base.Condition` from a lexed tokens list. For example:
4340
4341        >>> pf = ParseFormat()
4342        >>> tokens = [
4343        ...     Lexeme.doubleQuestionmark,
4344        ...     Lexeme.openParen,
4345        ...     "fire",
4346        ...     Lexeme.ampersand,
4347        ...     "water",
4348        ...     Lexeme.closeParen,
4349        ...     Lexeme.openCurly,
4350        ...     "gain",
4351        ...     "wind",
4352        ...     Lexeme.closeCurly,
4353        ...     Lexeme.openCurly,
4354        ...     Lexeme.closeCurly,
4355        ... ]
4356        >>> pf.parseConditionFromTokens(tokens) == base.condition(
4357        ...     condition=base.ReqAll([
4358        ...         base.ReqCapability('fire'),
4359        ...         base.ReqCapability('water')
4360        ...     ]),
4361        ...     consequence=[base.effect(gain='wind')]
4362        ... )
4363        True
4364        """
4365        start, end, nTokens = normalizeEnds(tokens, start, end)
4366        if nTokens < 8:
4367            raise ParseError(
4368                f"A Condition requires at least 8 tokens (got {nTokens})."
4369            )
4370        if tokens[start] != Lexeme.doubleQuestionmark:
4371            raise ParseError(
4372                f"A Condition must start with"
4373                f" {repr(self.formatDict[Lexeme.doubleQuestionmark])}"
4374            )
4375        try:
4376            consequenceStart = tokens.index(Lexeme.openCurly, start)
4377        except ValueError:
4378            raise ParseError("A condition must include a consequence block.")
4379        consequenceEnd = self.matchingBrace(tokens, consequenceStart)
4380        altStart = consequenceEnd + 1
4381        altEnd = self.matchingBrace(tokens, altStart)
4382
4383        if altEnd != end:
4384            raise ParseError(
4385                f"Junk after condition:\n{tokens[altEnd + 1: end + 1]}"
4386            )
4387
4388        return base.condition(
4389            condition=self.parseRequirementFromTokens(
4390                tokens,
4391                start + 1,
4392                consequenceStart - 1
4393            ),
4394            consequence=self.parseConsequenceFromTokens(
4395                tokens,
4396                consequenceStart,
4397                consequenceEnd
4398            ),
4399            alternative=self.parseConsequenceFromTokens(
4400                tokens,
4401                altStart,
4402                altEnd
4403            )
4404        )

Parses a base.Condition from a lexed tokens list. For example:

>>> pf = ParseFormat()
>>> tokens = [
...     Lexeme.doubleQuestionmark,
...     Lexeme.openParen,
...     "fire",
...     Lexeme.ampersand,
...     "water",
...     Lexeme.closeParen,
...     Lexeme.openCurly,
...     "gain",
...     "wind",
...     Lexeme.closeCurly,
...     Lexeme.openCurly,
...     Lexeme.closeCurly,
... ]
>>> pf.parseConditionFromTokens(tokens) == base.condition(
...     condition=base.ReqAll([
...         base.ReqCapability('fire'),
...         base.ReqCapability('water')
...     ]),
...     consequence=[base.effect(gain='wind')]
... )
True
def parseCondition(self, encoded: str) -> exploration.base.Condition:
4406    def parseCondition(
4407        self,
4408        encoded: str
4409    ) -> base.Condition:
4410        """
4411        Lexes the given string and then calls `parseConditionFromTokens`
4412        to return a `base.Condition`.
4413        """
4414        return self.parseConditionFromTokens(
4415            lex(encoded, self.reverseFormat)
4416        )

Lexes the given string and then calls parseConditionFromTokens to return a base.Condition.

def parseChallengeFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Challenge:
4418    def parseChallengeFromTokens(
4419        self,
4420        tokens: LexedTokens,
4421        start: int = 0,
4422        end: int = -1
4423    ) -> base.Challenge:
4424        """
4425        Parses a `base.Challenge` from a lexed tokens list.
4426
4427        For example:
4428
4429        >>> pf = ParseFormat()
4430        >>> tokens = [
4431        ...     Lexeme.angleLeft,
4432        ...     '2',
4433        ...     Lexeme.angleRight,
4434        ...     'best',
4435        ...     Lexeme.openParen,
4436        ...     "chess",
4437        ...     Lexeme.sepOrDelay,
4438        ...     "checkers",
4439        ...     Lexeme.closeParen,
4440        ...     Lexeme.openCurly,
4441        ...     "gain",
4442        ...     "coin",
4443        ...     Lexeme.tokenCount,
4444        ...     "5",
4445        ...     Lexeme.closeCurly,
4446        ...     Lexeme.angleRight,
4447        ...     Lexeme.openCurly,
4448        ...     "lose",
4449        ...     "coin",
4450        ...     Lexeme.tokenCount,
4451        ...     "5",
4452        ...     Lexeme.closeCurly,
4453        ... ]
4454        >>> c = pf.parseChallengeFromTokens(tokens)
4455        >>> c['skills'] == base.BestSkill('chess', 'checkers')
4456        True
4457        >>> c['level']
4458        2
4459        >>> c['success'] == [base.effect(gain=('coin', 5))]
4460        True
4461        >>> c['failure'] == [base.effect(lose=('coin', 5))]
4462        True
4463        >>> c['outcome']
4464        False
4465        >>> c == base.challenge(
4466        ...     skills=base.BestSkill('chess', 'checkers'),
4467        ...     level=2,
4468        ...     success=[base.effect(gain=('coin', 5))],
4469        ...     failure=[base.effect(lose=('coin', 5))],
4470        ...     outcome=False
4471        ... )
4472        True
4473        >>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
4474        >>> c == pf.parseChallengeFromTokens(t2, 1, -2)
4475        True
4476        """
4477        start, end, nTokens = normalizeEnds(tokens, start, end)
4478        if nTokens < 8:
4479            raise ParseError(
4480                f"Not enough tokens for a challenge: {nTokens}"
4481            )
4482        if tokens[start] != Lexeme.angleLeft:
4483            raise ParseError(
4484                f"Challenge must start with"
4485                f" {repr(self.formatDict[Lexeme.angleLeft])}"
4486            )
4487        levelStr = tokens[start + 1]
4488        if isinstance(levelStr, Lexeme):
4489            raise ParseError(
4490                f"Challenge must start with a level in angle brackets"
4491                f" (got {repr(self.formatDict[levelStr])})."
4492            )
4493        if tokens[start + 2] != Lexeme.angleRight:
4494            raise ParseError(
4495                f"Challenge must include"
4496                f" {repr(self.formatDict[Lexeme.angleRight])} after"
4497                f" the level."
4498            )
4499        try:
4500            level = int(levelStr)
4501        except ValueError:
4502            raise ParseError(
4503                f"Challenge level must be an integer (got"
4504                f" {repr(tokens[start + 1])}."
4505            )
4506        try:
4507            successStart = tokens.index(Lexeme.openCurly, start)
4508            skillsEnd = successStart - 1
4509        except ValueError:
4510            raise ParseError("A challenge must include a consequence block.")
4511
4512        outcome: Optional[bool] = None
4513        if tokens[skillsEnd] == Lexeme.angleRight:
4514            skillsEnd -= 1
4515            outcome = True
4516        successEnd = self.matchingBrace(tokens, successStart)
4517        failStart = successEnd + 1
4518        if tokens[failStart] == Lexeme.angleRight:
4519            failStart += 1
4520            if outcome is not None:
4521                raise ParseError(
4522                    "Cannot indicate both success and failure as"
4523                    " outcomes in a challenge."
4524                )
4525            outcome = False
4526        failEnd = self.matchingBrace(tokens, failStart)
4527
4528        if failEnd != end:
4529            raise ParseError(
4530                f"Junk after condition:\n{tokens[failEnd + 1:end + 1]}"
4531            )
4532
4533        skills = self.parseSkillCombinationFromTokens(
4534            tokens,
4535            start + 3,
4536            skillsEnd
4537        )
4538        if isinstance(skills, base.Skill):
4539            skills = base.BestSkill(skills)
4540
4541        return base.challenge(
4542            level=level,
4543            outcome=outcome,
4544            skills=skills,
4545            success=self.parseConsequenceFromTokens(
4546                tokens[successStart:successEnd + 1]
4547            ),
4548            failure=self.parseConsequenceFromTokens(
4549                tokens[failStart:failEnd + 1]
4550            )
4551        )

Parses a base.Challenge from a lexed tokens list.

For example:

>>> pf = ParseFormat()
>>> tokens = [
...     Lexeme.angleLeft,
...     '2',
...     Lexeme.angleRight,
...     'best',
...     Lexeme.openParen,
...     "chess",
...     Lexeme.sepOrDelay,
...     "checkers",
...     Lexeme.closeParen,
...     Lexeme.openCurly,
...     "gain",
...     "coin",
...     Lexeme.tokenCount,
...     "5",
...     Lexeme.closeCurly,
...     Lexeme.angleRight,
...     Lexeme.openCurly,
...     "lose",
...     "coin",
...     Lexeme.tokenCount,
...     "5",
...     Lexeme.closeCurly,
... ]
>>> c = pf.parseChallengeFromTokens(tokens)
>>> c['skills'] == base.BestSkill('chess', 'checkers')
True
>>> c['level']
2
>>> c['success'] == [base.effect(gain=('coin', 5))]
True
>>> c['failure'] == [base.effect(lose=('coin', 5))]
True
>>> c['outcome']
False
>>> c == base.challenge(
...     skills=base.BestSkill('chess', 'checkers'),
...     level=2,
...     success=[base.effect(gain=('coin', 5))],
...     failure=[base.effect(lose=('coin', 5))],
...     outcome=False
... )
True
>>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
>>> c == pf.parseChallengeFromTokens(t2, 1, -2)
True
def parseChallenge(self, encoded: str) -> exploration.base.Challenge:
4553    def parseChallenge(
4554        self,
4555        encoded: str
4556    ) -> base.Challenge:
4557        """
4558        Lexes the given string and then calls `parseChallengeFromTokens`
4559        to return a `base.Challenge`.
4560        """
4561        return self.parseChallengeFromTokens(
4562            lex(encoded, self.reverseFormat)
4563        )

Lexes the given string and then calls parseChallengeFromTokens to return a base.Challenge.

def parseConsequenceFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]:
4565    def parseConsequenceFromTokens(
4566        self,
4567        tokens: LexedTokens,
4568        start: int = 0,
4569        end: int = -1
4570    ) -> base.Consequence:
4571        """
4572        Parses a consequence from a lexed token list. If start and/or end
4573        are specified, only processes the part of the list between those
4574        two indices (inclusive). Use `lex` to turn a string into a
4575        `LexedTokens` list (or use `ParseFormat.parseConsequence` which
4576        does that for you).
4577
4578        An example:
4579
4580        >>> pf = ParseFormat()
4581        >>> tokens = [
4582        ...     Lexeme.openCurly,
4583        ...     'gain',
4584        ...     'power',
4585        ...     Lexeme.closeCurly
4586        ... ]
4587        >>> c = pf.parseConsequenceFromTokens(tokens)
4588        >>> c == [base.effect(gain='power')]
4589        True
4590        >>> tokens.append('hi')
4591        >>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
4592        True
4593        >>> c == pf.parseConsequenceFromTokens(tokens, end=3)
4594        True
4595        """
4596        start, end, nTokens = normalizeEnds(tokens, start, end)
4597
4598        if nTokens < 2:
4599            raise ParseError("Consequence must have at least two tokens.")
4600
4601        if tokens[start] != Lexeme.openCurly:
4602            raise ParseError(
4603                f"Consequence must start with an open curly brace:"
4604                f" {repr(self.formatDict[Lexeme.openCurly])}."
4605            )
4606
4607        if tokens[end] != Lexeme.closeCurly:
4608            raise ParseError(
4609                f"Consequence must end with a closing curly brace:"
4610                f" {repr(self.formatDict[Lexeme.closeCurly])}."
4611            )
4612
4613        if nTokens == 2:
4614            return []
4615
4616        result: base.Consequence = []
4617        for partStart, partEnd in findSeparatedParts(
4618            tokens,
4619            Lexeme.consequenceSeparator,
4620            start + 1,
4621            end - 1,
4622            Lexeme.openCurly,
4623            Lexeme.closeCurly
4624        ):
4625            if partEnd - partStart < 0:
4626                raise ParseError("Empty consequence part.")
4627            if tokens[partStart] == Lexeme.angleLeft:  # a challenge
4628                result.append(
4629                    self.parseChallengeFromTokens(
4630                        tokens,
4631                        partStart,
4632                        partEnd
4633                    )
4634                )
4635            elif tokens[partStart] == Lexeme.doubleQuestionmark:  # condition
4636                result.append(
4637                    self.parseConditionFromTokens(
4638                        tokens,
4639                        partStart,
4640                        partEnd
4641                    )
4642                )
4643            else:  # Must be an effect
4644                result.append(
4645                    self.parseEffectFromTokens(
4646                        tokens,
4647                        partStart,
4648                        partEnd
4649                    )
4650                )
4651
4652        return result

Parses a consequence from a lexed token list. If start and/or end are specified, only processes the part of the list between those two indices (inclusive). Use lex to turn a string into a LexedTokens list (or use ParseFormat.parseConsequence which does that for you).

An example:

>>> pf = ParseFormat()
>>> tokens = [
...     Lexeme.openCurly,
...     'gain',
...     'power',
...     Lexeme.closeCurly
... ]
>>> c = pf.parseConsequenceFromTokens(tokens)
>>> c == [base.effect(gain='power')]
True
>>> tokens.append('hi')
>>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
True
>>> c == pf.parseConsequenceFromTokens(tokens, end=3)
True
def parseConsequence( self, encoded: str) -> List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]:
4654    def parseConsequence(self, encoded: str) -> base.Consequence:
4655        """
4656        Parses a consequence from a string. Uses `lex` and
4657        `ParseFormat.parseConsequenceFromTokens`. For example:
4658
4659        >>> pf = ParseFormat()
4660        >>> c = pf.parseConsequence(
4661        ...   '{gain power}'
4662        ... )
4663        >>> c == [base.effect(gain='power')]
4664        True
4665        >>> pf.unparseConsequence(c)
4666        '{gain power}'
4667        >>> c = pf.parseConsequence(
4668        ...     '{\\n'
4669        ...     '    ??(brawny|!weights*3){\\n'
4670        ...     '        <3>sum(brains, brawn){goto home}>{bounce}\\n'
4671        ...     '    }{};\\n'
4672        ...     '    lose coin*1\\n'
4673        ...     '}'
4674        ... )
4675        >>> len(c)
4676        2
4677        >>> c[0]['condition'] == base.ReqAny([
4678        ...     base.ReqCapability('brawny'),
4679        ...     base.ReqNot(base.ReqTokens('weights', 3))
4680        ... ])
4681        True
4682        >>> len(c[0]['consequence'])
4683        1
4684        >>> len(c[0]['alternative'])
4685        0
4686        >>> cons = c[0]['consequence'][0]
4687        >>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
4688        True
4689        >>> cons['level']
4690        3
4691        >>> len(cons['success'])
4692        1
4693        >>> len(cons['failure'])
4694        1
4695        >>> cons['success'][0] == base.effect(goto='home')
4696        True
4697        >>> cons['failure'][0] == base.effect(bounce=True)
4698        True
4699        >>> cons['outcome'] = False
4700        >>> c[0] == base.condition(
4701        ...     condition=base.ReqAny([
4702        ...         base.ReqCapability('brawny'),
4703        ...         base.ReqNot(base.ReqTokens('weights', 3))
4704        ...     ]),
4705        ...     consequence=[
4706        ...         base.challenge(
4707        ...             skills=base.CombinedSkill('brains', 'brawn'),
4708        ...             level=3,
4709        ...             success=[base.effect(goto='home')],
4710        ...             failure=[base.effect(bounce=True)],
4711        ...             outcome=False
4712        ...         )
4713        ...     ]
4714        ... )
4715        True
4716        >>> c[1] == base.effect(lose=('coin', 1))
4717        True
4718        """
4719        return self.parseConsequenceFromTokens(
4720            lex(encoded, self.reverseFormat)
4721        )

Parses a consequence from a string. Uses lex and ParseFormat.parseConsequenceFromTokens. For example:

>>> pf = ParseFormat()
>>> c = pf.parseConsequence(
...   '{gain power}'
... )
>>> c == [base.effect(gain='power')]
True
>>> pf.unparseConsequence(c)
'{gain power}'
>>> c = pf.parseConsequence(
...     '{\n'
...     '    ??(brawny|!weights*3){\n'
...     '        <3>sum(brains, brawn){goto home}>{bounce}\n'
...     '    }{};\n'
...     '    lose coin*1\n'
...     '}'
... )
>>> len(c)
2
>>> c[0]['condition'] == base.ReqAny([
...     base.ReqCapability('brawny'),
...     base.ReqNot(base.ReqTokens('weights', 3))
... ])
True
>>> len(c[0]['consequence'])
1
>>> len(c[0]['alternative'])
0
>>> cons = c[0]['consequence'][0]
>>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
True
>>> cons['level']
3
>>> len(cons['success'])
1
>>> len(cons['failure'])
1
>>> cons['success'][0] == base.effect(goto='home')
True
>>> cons['failure'][0] == base.effect(bounce=True)
True
>>> cons['outcome'] = False
>>> c[0] == base.condition(
...     condition=base.ReqAny([
...         base.ReqCapability('brawny'),
...         base.ReqNot(base.ReqTokens('weights', 3))
...     ]),
...     consequence=[
...         base.challenge(
...             skills=base.CombinedSkill('brains', 'brawn'),
...             level=3,
...             success=[base.effect(goto='home')],
...             failure=[base.effect(bounce=True)],
...             outcome=False
...         )
...     ]
... )
True
>>> c[1] == base.effect(lose=('coin', 1))
True
class ParsedDotGraph(typing.TypedDict):
4728class ParsedDotGraph(TypedDict):
4729    """
4730    Represents a parsed `graphviz` dot-format graph consisting of nodes,
4731    edges, and subgraphs, with attributes attached to nodes and/or
4732    edges. An intermediate format during conversion to a full
4733    `DecisionGraph`. Includes the following slots:
4734
4735    - `'nodes'`: A list of tuples each holding a node ID followed by a
4736        list of name/value attribute pairs.
4737    - `'edges'`: A list of tuples each holding a from-ID, a to-ID,
4738        and then a list of name/value attribute pairs.
4739    - `'attrs'`: A list of tuples each holding a name/value attribute
4740        pair for graph-level attributes.
4741    - `'subgraphs'`: A list of subgraphs (each a tuple with a subgraph
4742        name and then another dictionary in the same format as this
4743        one).
4744    """
4745    nodes: List[Tuple[int, List[Tuple[str, str]]]]
4746    edges: List[Tuple[int, int, List[Tuple[str, str]]]]
4747    attrs: List[Tuple[str, str]]
4748    subgraphs: List[Tuple[str, 'ParsedDotGraph']]

Represents a parsed graphviz dot-format graph consisting of nodes, edges, and subgraphs, with attributes attached to nodes and/or edges. An intermediate format during conversion to a full DecisionGraph. Includes the following slots:

  • 'nodes': A list of tuples each holding a node ID followed by a list of name/value attribute pairs.
  • 'edges': A list of tuples each holding a from-ID, a to-ID, and then a list of name/value attribute pairs.
  • 'attrs': A list of tuples each holding a name/value attribute pair for graph-level attributes.
  • 'subgraphs': A list of subgraphs (each a tuple with a subgraph name and then another dictionary in the same format as this one).
nodes: List[Tuple[int, List[Tuple[str, str]]]]
edges: List[Tuple[int, int, List[Tuple[str, str]]]]
attrs: List[Tuple[str, str]]
subgraphs: List[Tuple[str, ParsedDotGraph]]
def parseSimpleDotAttrs(fragment: str) -> List[Tuple[str, str]]:
4751def parseSimpleDotAttrs(fragment: str) -> List[Tuple[str, str]]:
4752    """
4753    Given a string fragment that starts with '[' and ends with ']',
4754    parses a simple attribute list in `graphviz` dot format from that
4755    fragment, returning a list of name/value attribute tuples. Raises a
4756    `DotParseError` if the fragment doesn't have the right format.
4757
4758    Examples:
4759
4760    >>> parseSimpleDotAttrs('[ name=value ]')
4761    [('name', 'value')]
4762    >>> parseSimpleDotAttrs('[ a=b c=d e=f ]')
4763    [('a', 'b'), ('c', 'd'), ('e', 'f')]
4764    >>> parseSimpleDotAttrs('[ a=b "c d"="e f" ]')
4765    [('a', 'b'), ('c d', 'e f')]
4766    >>> parseSimpleDotAttrs('[a=b "c d"="e f"]')
4767    [('a', 'b'), ('c d', 'e f')]
4768    >>> parseSimpleDotAttrs('[ a=b "c d"="e f"')
4769    Traceback (most recent call last):
4770    ...
4771    exploration.parsing.DotParseError...
4772    >>> parseSimpleDotAttrs('a=b "c d"="e f" ]')
4773    Traceback (most recent call last):
4774    ...
4775    exploration.parsing.DotParseError...
4776    >>> parseSimpleDotAttrs('[ a b=c ]')
4777    Traceback (most recent call last):
4778    ...
4779    exploration.parsing.DotParseError...
4780    >>> parseSimpleDotAttrs('[ a=b c ]')
4781    Traceback (most recent call last):
4782    ...
4783    exploration.parsing.DotParseError...
4784    >>> parseSimpleDotAttrs('[ name="value" ]')
4785    [('name', 'value')]
4786    >>> parseSimpleDotAttrs('[ name="\\\\"value\\\\"" ]')
4787    [('name', '"value"')]
4788    """
4789    if not fragment.startswith('[') or not fragment.endswith(']'):
4790        raise DotParseError(
4791            f"Simple attrs fragment missing delimiters:"
4792            f"\n  {repr(fragment)}"
4793        )
4794    result = []
4795    rest = fragment[1:-1].strip()
4796    while rest:
4797        # Get possibly-quoted attribute name:
4798        if rest.startswith('"'):
4799            try:
4800                aName, rest = utils.unquoted(rest)
4801            except ValueError:
4802                raise DotParseError(
4803                    f"Malformed quoted attribute name in"
4804                    f" fragment:\n  {repr(fragment)}"
4805                )
4806            rest = rest.lstrip()
4807            if not rest.startswith('='):
4808                raise DotParseError(
4809                    f"Missing '=' in attribute block in"
4810                    f" fragment:\n  {repr(fragment)}"
4811                )
4812            rest = rest[1:].lstrip()
4813        else:
4814            try:
4815                eqInd = rest.index('=')
4816            except ValueError:
4817                raise DotParseError(
4818                    f"Missing '=' in attribute block in"
4819                    f" fragment:\n  {repr(fragment)}"
4820                )
4821            aName = rest[:eqInd]
4822            if ' ' in aName:
4823                raise DotParseError(
4824                    f"Malformed unquoted attribute name"
4825                    f" {repr(aName)} in fragment:"
4826                    f"\n  {repr(fragment)}"
4827                )
4828            rest = rest[eqInd + 1:].lstrip()
4829
4830        # Get possibly-quoted attribute value:
4831        if rest.startswith('"'):
4832            try:
4833                aVal, rest = utils.unquoted(rest)
4834            except ValueError:
4835                raise DotParseError(
4836                    f"Malformed quoted attribute value in"
4837                    f" fragment:\n  {repr(fragment)}"
4838                )
4839            rest = rest.lstrip()
4840        else:
4841            try:
4842                spInd = rest.index(' ')
4843            except ValueError:
4844                spInd = len(rest)
4845            aVal = rest[:spInd]
4846            rest = rest[spInd:].lstrip()
4847
4848        # Append this attribute pair and continue parsing
4849        result.append((aName, aVal))
4850
4851    return result

Given a string fragment that starts with '[' and ends with ']', parses a simple attribute list in graphviz dot format from that fragment, returning a list of name/value attribute tuples. Raises a DotParseError if the fragment doesn't have the right format.

Examples:

>>> parseSimpleDotAttrs('[ name=value ]')
[('name', 'value')]
>>> parseSimpleDotAttrs('[ a=b c=d e=f ]')
[('a', 'b'), ('c', 'd'), ('e', 'f')]
>>> parseSimpleDotAttrs('[ a=b "c d"="e f" ]')
[('a', 'b'), ('c d', 'e f')]
>>> parseSimpleDotAttrs('[a=b "c d"="e f"]')
[('a', 'b'), ('c d', 'e f')]
>>> parseSimpleDotAttrs('[ a=b "c d"="e f"')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('a=b "c d"="e f" ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('[ a b=c ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('[ a=b c ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('[ name="value" ]')
[('name', 'value')]
>>> parseSimpleDotAttrs('[ name="\\"value\\"" ]')
[('name', '"value"')]
def parseDotNode(nodeLine: str) -> Tuple[int, Union[bool, List[Tuple[str, str]]]]:
4854def parseDotNode(
4855    nodeLine: str
4856) -> Tuple[int, Union[bool, List[Tuple[str, str]]]]:
4857    """
4858    Given a line of text from a `graphviz` dot-format graph
4859    (possibly ending in an '[' to indicate attributes to follow, or
4860    possible including a '[ ... ]' block with attributes in-line),
4861    parses it as a node declaration, returning the ID of the node,
4862    along with a boolean indicating whether attributes follow or
4863    not. If an inline attribute block is present, the second member
4864    of the tuple will be a list of attribute name/value pairs. In
4865    that case, all attribute names and values must either be quoted
4866    or not include spaces.
4867    Examples:
4868
4869    >>> parseDotNode('1')
4870    (1, False)
4871    >>> parseDotNode(' 1 [ ')
4872    (1, True)
4873    >>> parseDotNode(' 1 [ a=b "c d"="e f" ] ')
4874    (1, [('a', 'b'), ('c d', 'e f')])
4875    >>> parseDotNode(' 3 [ name="A = \\\\"grate:open\\\\"" ]')
4876    (3, [('name', 'A = "grate:open"')])
4877    >>> parseDotNode('  "1"[')
4878    (1, True)
4879    >>> parseDotNode('  100[')
4880    (100, True)
4881    >>> parseDotNode('  1 2')
4882    Traceback (most recent call last):
4883    ...
4884    exploration.parsing.DotParseError...
4885    >>> parseDotNode('  1 [ 2')
4886    Traceback (most recent call last):
4887    ...
4888    exploration.parsing.DotParseError...
4889    >>> parseDotNode('  1 2')
4890    Traceback (most recent call last):
4891    ...
4892    exploration.parsing.DotParseError...
4893    >>> parseDotNode('  1 [ junk not=attrs ]')
4894    Traceback (most recent call last):
4895    ...
4896    exploration.parsing.DotParseError...
4897    >>> parseDotNode('  \\n')
4898    Traceback (most recent call last):
4899    ...
4900    exploration.parsing.DotParseError...
4901    """
4902    stripped = nodeLine.strip()
4903    if len(stripped) == 0:
4904        raise DotParseError(
4905            "Empty node in dot graph on line:\n  {repr(nodeLine)}"
4906        )
4907    hasAttrs: Union[bool, List[Tuple[str, str]]] = False
4908    if stripped.startswith('"'):
4909        nodeName, rest = utils.unquoted(stripped)
4910        rest = rest.strip()
4911        if rest == '[':
4912            hasAttrs = True
4913        elif rest.startswith('[') and rest.endswith(']'):
4914            hasAttrs = parseSimpleDotAttrs(rest)
4915        elif rest:
4916            raise DotParseError(
4917                f"Extra junk {repr(rest)} after node on line:"
4918                f"\n {repr(nodeLine)}"
4919            )
4920
4921    else:
4922        if stripped.endswith('['):
4923            hasAttrs = True
4924            stripped = stripped[:-1].rstrip()
4925        elif stripped.endswith(']'):
4926            try:
4927                # TODO: Why did this used to be rindex? Was that
4928                # important in some case? (That doesn't work since the
4929                # value may contain a quoted open bracket).
4930                attrStart = stripped.index('[')
4931            except ValueError:
4932                raise DotParseError(
4933                    f"Unmatched ']' on line:\n  {repr(nodeLine)}"
4934                )
4935            hasAttrs = parseSimpleDotAttrs(
4936                stripped[attrStart:]
4937            )
4938            stripped = stripped[:attrStart].rstrip()
4939
4940        if ' ' in stripped:
4941            raise DotParseError(
4942                f"Unquoted multi-word node on line:\n  {repr(nodeLine)}"
4943            )
4944        else:
4945            nodeName = stripped
4946
4947    try:
4948        nodeID = int(nodeName)
4949    except ValueError:
4950        raise DotParseError(
4951            f"Node name f{repr(nodeName)} is not an integer on"
4952            f" line:\n {repr(nodeLine)}"
4953        )
4954
4955    return (nodeID, hasAttrs)

Given a line of text from a graphviz dot-format graph (possibly ending in an '[' to indicate attributes to follow, or possible including a '[ ... ]' block with attributes in-line), parses it as a node declaration, returning the ID of the node, along with a boolean indicating whether attributes follow or not. If an inline attribute block is present, the second member of the tuple will be a list of attribute name/value pairs. In that case, all attribute names and values must either be quoted or not include spaces. Examples:

>>> parseDotNode('1')
(1, False)
>>> parseDotNode(' 1 [ ')
(1, True)
>>> parseDotNode(' 1 [ a=b "c d"="e f" ] ')
(1, [('a', 'b'), ('c d', 'e f')])
>>> parseDotNode(' 3 [ name="A = \\"grate:open\\"" ]')
(3, [('name', 'A = "grate:open"')])
>>> parseDotNode('  "1"[')
(1, True)
>>> parseDotNode('  100[')
(100, True)
>>> parseDotNode('  1 2')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  1 [ 2')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  1 2')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  1 [ junk not=attrs ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  \n')
Traceback (most recent call last):
...
DotParseError...
def parseDotAttr(attrLine: str) -> Tuple[str, str]:
4958def parseDotAttr(attrLine: str) -> Tuple[str, str]:
4959    """
4960    Given a line of text from a `graphviz` dot-format graph, parses
4961    it as an attribute (maybe-quoted-attr-name =
4962    maybe-quoted-attr-value). Returns the (maybe-unquoted) attr-name
4963    and the (maybe-unquoted) attr-value as a pair of strings. Raises
4964    a `DotParseError` if the line cannot be parsed as an attribute.
4965    Examples:
4966
4967    >>> parseDotAttr("a=b")
4968    ('a', 'b')
4969    >>> parseDotAttr("  a = b ")
4970    ('a', 'b')
4971    >>> parseDotAttr('"a" = "b"')
4972    ('a', 'b')
4973    >>> parseDotAttr('"a" -> "b"')
4974    Traceback (most recent call last):
4975    ...
4976    exploration.parsing.DotParseError...
4977    >>> parseDotAttr('"a" = "b" c')
4978    Traceback (most recent call last):
4979    ...
4980    exploration.parsing.DotParseError...
4981    >>> parseDotAttr('a')
4982    Traceback (most recent call last):
4983    ...
4984    exploration.parsing.DotParseError...
4985    >>> parseDotAttr('')
4986    Traceback (most recent call last):
4987    ...
4988    exploration.parsing.DotParseError...
4989    >>> parseDotAttr('0 [ name="A" ]')
4990    Traceback (most recent call last):
4991    ...
4992    exploration.parsing.DotParseError...
4993    """
4994    stripped = attrLine.lstrip()
4995    if len(stripped) == 0:
4996        raise DotParseError(
4997            "Empty attribute in dot graph on line:\n  {repr(attrLine)}"
4998        )
4999    if stripped.endswith(']') or stripped.endswith('['):
5000        raise DotParseError(
5001            f"Node attribute ends in '[' or ']' on line:"
5002            f"\n  {repr(attrLine)}"
5003        )
5004    if stripped.startswith('"'):
5005        try:
5006            attrName, rest = utils.unquoted(stripped)
5007        except ValueError:
5008            raise DotParseError(
5009                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5010            )
5011        rest = rest.lstrip()
5012        if len(rest) == 0 or rest[0] != '=':
5013            raise DotParseError(
5014                f"No equals sign following attribute name on"
5015                f" line:\n  {repr(attrLine)}"
5016            )
5017        rest = rest[1:].lstrip()
5018    else:
5019        try:
5020            eqInd = stripped.index('=')
5021        except ValueError:
5022            raise DotParseError(
5023                f"No equals sign in attribute line:"
5024                f"\n  {repr(attrLine)}"
5025            )
5026        attrName = stripped[:eqInd].rstrip()
5027        rest = stripped[eqInd + 1:].lstrip()
5028
5029    if rest[0] == '"':
5030        try:
5031            attrVal, rest = utils.unquoted(rest)
5032        except ValueError:
5033            raise DotParseError(
5034                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5035            )
5036        if rest.strip():
5037            raise DotParseError(
5038                f"Junk after attribute on line:"
5039                f"\n  {repr(attrLine)}"
5040            )
5041    else:
5042        attrVal = rest.rstrip()
5043
5044    return attrName, attrVal

Given a line of text from a graphviz dot-format graph, parses it as an attribute (maybe-quoted-attr-name = maybe-quoted-attr-value). Returns the (maybe-unquoted) attr-name and the (maybe-unquoted) attr-value as a pair of strings. Raises a DotParseError if the line cannot be parsed as an attribute. Examples:

>>> parseDotAttr("a=b")
('a', 'b')
>>> parseDotAttr("  a = b ")
('a', 'b')
>>> parseDotAttr('"a" = "b"')
('a', 'b')
>>> parseDotAttr('"a" -> "b"')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('"a" = "b" c')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('a')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('0 [ name="A" ]')
Traceback (most recent call last):
...
DotParseError...
def parseDotEdge(edgeLine: str) -> Tuple[int, int, bool]:
5047def parseDotEdge(edgeLine: str) -> Tuple[int, int, bool]:
5048    """
5049    Given a line of text from a `graphviz` dot-format graph, parses
5050    it as an edge (fromID -> toID). Returns a tuple containing the
5051    from ID, the to ID, and a boolean indicating whether attributes
5052    follow the edge on subsequent lines (true if the line ends with
5053    '['). Raises a `DotParseError` if the line cannot be parsed as
5054    an edge pair. Examples:
5055
5056    >>> parseDotEdge("1 -> 2")
5057    (1, 2, False)
5058    >>> parseDotEdge("  1 -> 2 ")
5059    (1, 2, False)
5060    >>> parseDotEdge('"1" -> "2"')
5061    (1, 2, False)
5062    >>> parseDotEdge('"1" -> "2" [')
5063    (1, 2, True)
5064    >>> parseDotEdge("a -> b")
5065    Traceback (most recent call last):
5066    ...
5067    exploration.parsing.DotParseError...
5068    >>> parseDotEdge('"1" = "1"')
5069    Traceback (most recent call last):
5070    ...
5071    exploration.parsing.DotParseError...
5072    >>> parseDotEdge('"1" -> "2" c')
5073    Traceback (most recent call last):
5074    ...
5075    exploration.parsing.DotParseError...
5076    >>> parseDotEdge('1')
5077    Traceback (most recent call last):
5078    ...
5079    exploration.parsing.DotParseError...
5080    >>> parseDotEdge('')
5081    Traceback (most recent call last):
5082    ...
5083    exploration.parsing.DotParseError...
5084    """
5085    stripped = edgeLine.lstrip()
5086    if len(stripped) == 0:
5087        raise DotParseError(
5088            "Empty edge in dot graph on line:\n  {repr(edgeLine)}"
5089        )
5090    if stripped.startswith('"'):
5091        try:
5092            fromStr, rest = utils.unquoted(stripped)
5093        except ValueError:
5094            raise DotParseError(
5095                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5096            )
5097        rest = rest.lstrip()
5098        if rest[:2] != '->':
5099            raise DotParseError(
5100                f"No arrow sign following source name on"
5101                f" line:\n  {repr(edgeLine)}"
5102            )
5103        rest = rest[2:].lstrip()
5104    else:
5105        try:
5106            arrowInd = stripped.index('->')
5107        except ValueError:
5108            raise DotParseError(
5109                f"No arrow in edge line:"
5110                f"\n  {repr(edgeLine)}"
5111            )
5112        fromStr = stripped[:arrowInd].rstrip()
5113        rest = stripped[arrowInd + 2:].lstrip()
5114        if ' ' in fromStr:
5115            raise DotParseError(
5116                f"Unquoted multi-word edge source on line:"
5117                f"\n  {repr(edgeLine)}"
5118            )
5119
5120    hasAttrs = False
5121    if rest[0] == '"':
5122        try:
5123            toStr, rest = utils.unquoted(rest)
5124        except ValueError:
5125            raise DotParseError(
5126                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5127            )
5128        stripped = rest.strip()
5129        if stripped == '[':
5130            hasAttrs = True
5131        elif stripped:
5132            raise DotParseError(
5133                f"Junk after edge on line:"
5134                f"\n  {repr(edgeLine)}"
5135            )
5136    else:
5137        toStr = rest.rstrip()
5138        if toStr.endswith('['):
5139            toStr = toStr[:-1].rstrip()
5140            hasAttrs = True
5141        if ' ' in toStr:
5142            raise DotParseError(
5143                f"Unquoted multi-word edge destination on line:"
5144                f"\n  {repr(edgeLine)}"
5145            )
5146
5147    try:
5148        fromID = int(fromStr)
5149    except ValueError:
5150        raise DotParseError(
5151            f"Invalid 'from' ID: {repr(fromStr)} on line:"
5152            f"\n  {repr(edgeLine)}"
5153        )
5154
5155    try:
5156        toID = int(toStr)
5157    except ValueError:
5158        raise DotParseError(
5159            f"Invalid 'to' ID: {repr(toStr)} on line:"
5160            f"\n  {repr(edgeLine)}"
5161        )
5162
5163    return (fromID, toID, hasAttrs)

Given a line of text from a graphviz dot-format graph, parses it as an edge (fromID -> toID). Returns a tuple containing the from ID, the to ID, and a boolean indicating whether attributes follow the edge on subsequent lines (true if the line ends with '['). Raises a DotParseError if the line cannot be parsed as an edge pair. Examples:

>>> parseDotEdge("1 -> 2")
(1, 2, False)
>>> parseDotEdge("  1 -> 2 ")
(1, 2, False)
>>> parseDotEdge('"1" -> "2"')
(1, 2, False)
>>> parseDotEdge('"1" -> "2" [')
(1, 2, True)
>>> parseDotEdge("a -> b")
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('"1" = "1"')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('"1" -> "2" c')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('1')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('')
Traceback (most recent call last):
...
DotParseError...
def parseDotAttrList(lines: List[str]) -> Tuple[List[Tuple[str, str]], List[str]]:
5166def parseDotAttrList(
5167    lines: List[str]
5168) -> Tuple[List[Tuple[str, str]], List[str]]:
5169    """
5170    Given a list of lines of text from a `graphviz` dot-format
5171    graph which starts with an attribute line, parses multiple
5172    attribute lines until a line containing just ']' is found.
5173    Returns a list of the parsed name/value attribute pair tuples,
5174    along with a list of remaining unparsed strings (not counting
5175    the closing ']' line). Raises a `DotParseError` if it finds a
5176    non-attribute line or if it fails to find a closing ']' line.
5177    Examples:
5178
5179    >>> parseDotAttrList([
5180    ...     'a=b\\n',
5181    ...     'c=d\\n',
5182    ...     ']\\n',
5183    ... ])
5184    ([('a', 'b'), ('c', 'd')], [])
5185    >>> parseDotAttrList([
5186    ...     'a=b',
5187    ...     'c=d',
5188    ...     '  ]',
5189    ...     'more',
5190    ...     'lines',
5191    ... ])
5192    ([('a', 'b'), ('c', 'd')], ['more', 'lines'])
5193    >>> parseDotAttrList([
5194    ...     'a=b',
5195    ...     'c=d',
5196    ... ])
5197    Traceback (most recent call last):
5198    ...
5199    exploration.parsing.DotParseError...
5200    """
5201    index = 0
5202    found = []
5203    while index < len(lines):
5204        thisLine = lines[index]
5205        try:
5206            found.append(parseDotAttr(thisLine))
5207        except DotParseError:
5208            if thisLine.strip() == ']':
5209                return (found, lines[index + 1:])
5210            else:
5211                raise DotParseError(
5212                    f"Could not parse attribute from line:"
5213                    f"\n  {repr(thisLine)}"
5214                    f"\nAttributes block starts on line:"
5215                    f"\n  {repr(lines[0])}"
5216                )
5217        index += 1
5218
5219    raise DotParseError(
5220        f"No list terminator (']') for attributes starting on line:"
5221        f"\n  {repr(lines[0])}"
5222    )

Given a list of lines of text from a graphviz dot-format graph which starts with an attribute line, parses multiple attribute lines until a line containing just ']' is found. Returns a list of the parsed name/value attribute pair tuples, along with a list of remaining unparsed strings (not counting the closing ']' line). Raises a DotParseError if it finds a non-attribute line or if it fails to find a closing ']' line. Examples:

>>> parseDotAttrList([
...     'a=b\n',
...     'c=d\n',
...     ']\n',
... ])
([('a', 'b'), ('c', 'd')], [])
>>> parseDotAttrList([
...     'a=b',
...     'c=d',
...     '  ]',
...     'more',
...     'lines',
... ])
([('a', 'b'), ('c', 'd')], ['more', 'lines'])
>>> parseDotAttrList([
...     'a=b',
...     'c=d',
... ])
Traceback (most recent call last):
...
DotParseError...
def parseDotSubgraphStart(line: str) -> str:
5225def parseDotSubgraphStart(line: str) -> str:
5226    """
5227    Parses the start of a subgraph from a line of a graph file. The
5228    line must start with the word 'subgraph' and then have a name,
5229    followed by a '{' at the end of the line. Raises a
5230    `DotParseError` if this format doesn't match. Examples:
5231
5232    >>> parseDotSubgraphStart('subgraph A {')
5233    'A'
5234    >>> parseDotSubgraphStart('subgraph A B {')
5235    Traceback (most recent call last):
5236    ...
5237    exploration.parsing.DotParseError...
5238    >>> parseDotSubgraphStart('subgraph "A B" {')
5239    'A B'
5240    >>> parseDotSubgraphStart('subgraph A')
5241    Traceback (most recent call last):
5242    ...
5243    exploration.parsing.DotParseError...
5244    """
5245    stripped = line.strip()
5246    if len(stripped) == 0:
5247        raise DotParseError(
5248            f"Empty line where subgraph was expected:"
5249            f"\n  {repr(line)}"
5250        )
5251
5252    if not stripped.startswith('subgraph '):
5253        raise DotParseError(
5254            f"Subgraph doesn't start with 'subgraph' on line:"
5255            f"\n  {repr(line)}"
5256        )
5257
5258    stripped = stripped[9:]
5259    if stripped.startswith('"'):
5260        try:
5261            name, rest = utils.unquoted(stripped)
5262        except ValueError:
5263            raise DotParseError(
5264                f"Malformed quotes on subgraph line:\n {repr(line)}"
5265            )
5266        if rest.strip() != '{':
5267            raise DotParseError(
5268                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5269            )
5270    else:
5271        parts = stripped.split()
5272        if len(parts) != 2 or parts[1] != '{':
5273            raise DotParseError(
5274                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5275            )
5276        name, _ = parts
5277
5278    return name

Parses the start of a subgraph from a line of a graph file. The line must start with the word 'subgraph' and then have a name, followed by a '{' at the end of the line. Raises a DotParseError if this format doesn't match. Examples:

>>> parseDotSubgraphStart('subgraph A {')
'A'
>>> parseDotSubgraphStart('subgraph A B {')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotSubgraphStart('subgraph "A B" {')
'A B'
>>> parseDotSubgraphStart('subgraph A')
Traceback (most recent call last):
...
DotParseError...
def parseDotGraphContents(lines: List[str]) -> Tuple[ParsedDotGraph, List[str]]:
5281def parseDotGraphContents(
5282    lines: List[str]
5283) -> Tuple[ParsedDotGraph, List[str]]:
5284    """
5285    Given a list of lines from a `graphviz` dot-format string,
5286    parses the list as the contents of a graph (or subgraph),
5287    stopping when it reaches a line that just contains '}'. Raises a
5288    `DotParseError` if it cannot do so or if the terminator is
5289    missing. Returns a tuple containing the parsed graph data (see
5290    `ParsedDotGraph` and the list of remaining lines after the
5291    terminator. Recursively parses subgraphs. Example:
5292
5293    >>> bits = parseDotGraphContents([
5294    ...     '"graph attr"=1',
5295    ...     '1 [',
5296    ...     '  attr=value',
5297    ...     ']',
5298    ...     '1 -> 2 [',
5299    ...     '  fullLabel="to_B"',
5300    ...     '  quality=number',
5301    ...     ']',
5302    ...     'subgraph name {',
5303    ...     '  300',
5304    ...     '  400',
5305    ...     '  300 -> 400 [',
5306    ...     '    fullLabel=forward',
5307    ...     '  ]',
5308    ...     '}',
5309    ...     '}',
5310    ... ])
5311    >>> len(bits)
5312    2
5313    >>> g = bits[0]
5314    >>> bits[1]
5315    []
5316    >>> sorted(g.keys())
5317    ['attrs', 'edges', 'nodes', 'subgraphs']
5318    >>> g['nodes']
5319    [(1, [('attr', 'value')])]
5320    >>> g['edges']
5321    [(1, 2, [('fullLabel', 'to_B'), ('quality', 'number')])]
5322    >>> g['attrs']
5323    [('graph attr', '1')]
5324    >>> sgs = g['subgraphs']
5325    >>> len(sgs)
5326    1
5327    >>> len(sgs[0])
5328    2
5329    >>> sgs[0][0]
5330    'name'
5331    >>> sg = sgs[0][1]
5332    >>> sorted(sg.keys())
5333    ['attrs', 'edges', 'nodes', 'subgraphs']
5334    >>> sg["nodes"]
5335    [(300, []), (400, [])]
5336    >>> sg["edges"]
5337    [(300, 400, [('fullLabel', 'forward')])]
5338    >>> sg["attrs"]
5339    []
5340    >>> sg["subgraphs"]
5341    []
5342    """
5343    result: ParsedDotGraph = {
5344        'nodes': [],
5345        'edges': [],
5346        'attrs': [],
5347        'subgraphs': [],
5348    }
5349    index = 0
5350    remainder = None
5351    # Consider each line:
5352    while index < len(lines):
5353        # Grab line and pre-increment index
5354        thisLine = lines[index]
5355        index += 1
5356
5357        # Check for } first because it could be parsed as a node
5358        stripped = thisLine.strip()
5359        if stripped == '}':
5360            remainder = lines[index:]
5361            break
5362        elif stripped == '':  # ignore blank lines
5363            continue
5364
5365        # Cascading parsing attempts, since the possibilities are
5366        # mostly mutually exclusive.
5367        # TODO: Node/attr confusion with = in a node name?
5368        try:
5369            attrName, attrVal = parseDotAttr(thisLine)
5370            result['attrs'].append((attrName, attrVal))
5371        except DotParseError:
5372            try:
5373                fromNode, toNode, hasEAttrs = parseDotEdge(
5374                    thisLine
5375                )
5376                if hasEAttrs:
5377                    attrs, rest = parseDotAttrList(
5378                        lines[index:]
5379                    )
5380                    # Restart to process rest
5381                    lines = rest
5382                    index = 0
5383                else:
5384                    attrs = []
5385                result['edges'].append((fromNode, toNode, attrs))
5386            except DotParseError:
5387                try:
5388                    nodeName, hasNAttrs = parseDotNode(
5389                        thisLine
5390                    )
5391                    if hasNAttrs is True:
5392                        attrs, rest = parseDotAttrList(
5393                            lines[index:]
5394                        )
5395                        # Restart to process rest
5396                        lines = rest
5397                        index = 0
5398                    elif hasNAttrs:
5399                        attrs = hasNAttrs
5400                    else:
5401                        attrs = []
5402                    result['nodes'].append((nodeName, attrs))
5403                except DotParseError:
5404                    try:
5405                        subName = parseDotSubgraphStart(
5406                            thisLine
5407                        )
5408                        subStuff, rest = \
5409                            parseDotGraphContents(
5410                                lines[index:]
5411                            )
5412                        result['subgraphs'].append((subName, subStuff))
5413                        # Restart to process rest
5414                        lines = rest
5415                        index = 0
5416                    except DotParseError:
5417                        raise DotParseError(
5418                            f"Unrecognizable graph line (possibly"
5419                            f" beginning of unfinished structure):"
5420                            f"\n  {repr(thisLine)}"
5421                        )
5422    if remainder is None:
5423        raise DotParseError(
5424            f"Graph (or subgraph) is missing closing '}}'. Starts"
5425            f" on line:\n  {repr(lines[0])}"
5426        )
5427    else:
5428        return (result, remainder)

Given a list of lines from a graphviz dot-format string, parses the list as the contents of a graph (or subgraph), stopping when it reaches a line that just contains '}'. Raises a DotParseError if it cannot do so or if the terminator is missing. Returns a tuple containing the parsed graph data (see ParsedDotGraph and the list of remaining lines after the terminator. Recursively parses subgraphs. Example:

>>> bits = parseDotGraphContents([
...     '"graph attr"=1',
...     '1 [',
...     '  attr=value',
...     ']',
...     '1 -> 2 [',
...     '  fullLabel="to_B"',
...     '  quality=number',
...     ']',
...     'subgraph name {',
...     '  300',
...     '  400',
...     '  300 -> 400 [',
...     '    fullLabel=forward',
...     '  ]',
...     '}',
...     '}',
... ])
>>> len(bits)
2
>>> g = bits[0]
>>> bits[1]
[]
>>> sorted(g.keys())
['attrs', 'edges', 'nodes', 'subgraphs']
>>> g['nodes']
[(1, [('attr', 'value')])]
>>> g['edges']
[(1, 2, [('fullLabel', 'to_B'), ('quality', 'number')])]
>>> g['attrs']
[('graph attr', '1')]
>>> sgs = g['subgraphs']
>>> len(sgs)
1
>>> len(sgs[0])
2
>>> sgs[0][0]
'name'
>>> sg = sgs[0][1]
>>> sorted(sg.keys())
['attrs', 'edges', 'nodes', 'subgraphs']
>>> sg["nodes"]
[(300, []), (400, [])]
>>> sg["edges"]
[(300, 400, [('fullLabel', 'forward')])]
>>> sg["attrs"]
[]
>>> sg["subgraphs"]
[]
def parseDot( dotStr: str, parseFormat: ParseFormat = <ParseFormat object>) -> exploration.core.DecisionGraph:
5431def parseDot(
5432    dotStr: str,
5433    parseFormat: ParseFormat = ParseFormat()
5434) -> core.DecisionGraph:
5435    """
5436    Converts a `graphviz` dot-format string into a `core.DecisionGraph`.
5437    A custom `ParseFormat` may be specified if desired; the default
5438    `ParseFormat` is used if not. Note that this relies on specific
5439    indentation schemes used by `toDot` so a hand-edited dot-format
5440    graph will probably not work. A `DotParseError` is raised if the
5441    provided string can't be parsed. Example
5442
5443    >>> parseDotNode(' 3 [ label="A = \\\\"grate:open\\\\"" ]')
5444    (3, [('label', 'A = "grate:open"')])
5445    >>> sg = '''\
5446    ... subgraph __requirements__ {
5447    ...   3 [ label="A = \\\\"grate:open\\\\"" ]
5448    ...   4 [ label="B = \\\\"!(helmet)\\\\"" ]
5449    ...   5 [ label="C = \\\\"helmet\\\\"" ]
5450    ... }'''
5451    >>> parseDotGraphContents(sg.splitlines()[1:])
5452    ({'nodes': [(3, [('label', 'A = "grate:open"')]),\
5453 (4, [('label', 'B = "!(helmet)"')]), (5, [('label', 'C = "helmet"')])],\
5454 'edges': [], 'attrs': [], 'subgraphs': []}, [])
5455    >>> from . import core
5456    >>> dg = core.DecisionGraph.example('simple')
5457    >>> encoded = toDot(dg)
5458    >>> reconstructed = parseDot(encoded)
5459    >>> for diff in dg.listDifferences(reconstructed):
5460    ...     print(diff)
5461    >>> reconstructed == dg
5462    True
5463    >>> dg = core.DecisionGraph.example('abc')
5464    >>> encoded = toDot(dg)
5465    >>> reconstructed = parseDot(encoded)
5466    >>> for diff in dg.listDifferences(reconstructed):
5467    ...     print(diff)
5468    >>> reconstructed == dg
5469    True
5470    >>> tg = core.DecisionGraph()
5471    >>> tg.addDecision('A')
5472    0
5473    >>> tg.addDecision('B')
5474    1
5475    >>> tg.addTransition('A', 'up', 'B', 'down')
5476    >>> same = parseDot('''
5477    ... digraph {
5478    ...     0 [ name=A label=A ]
5479    ...       0 -> 1 [
5480    ...         label=up
5481    ...         fullLabel=up
5482    ...         reciprocal=down
5483    ...       ]
5484    ...     1 [ name=B label=B ]
5485    ...       1 -> 0 [
5486    ...         label=down
5487    ...         fullLabel=down
5488    ...         reciprocal=up
5489    ...       ]
5490    ... }''')
5491    >>> for diff in tg.listDifferences(same):
5492    ...     print(diff)
5493    >>> same == tg
5494    True
5495    >>> pf = ParseFormat()
5496    >>> tg.setTransitionRequirement('A', 'up', pf.parseRequirement('one|two'))
5497    >>> tg.setConsequence(
5498    ...     'B',
5499    ...     'down',
5500    ...     [base.effect(gain="one")]
5501    ... )
5502    >>> test = parseDot('''
5503    ...   digraph {
5504    ...     0 [ name="A = \\\\"one|two\\\\"" label="A = \\\\"one|two\\\\"" ]
5505    ...   }
5506    ... ''')
5507    >>> list(test.nodes)
5508    [0]
5509    >>> test.nodes[0]['name']
5510    'A = "one|two"'
5511    >>> eff = (
5512    ...   r'"A = \\"[{\\\\\\"type\\\\\\": \\\\\\"gain\\\\\\",'
5513    ...   r' \\\\\\"applyTo\\\\\\": \\\\\\"active\\\\\\",'
5514    ...   r' \\\\\\"value\\\\\\": \\\\\\"one\\\\\\",'
5515    ...   r' \\\\\\"charges\\\\\\": null, \\\\\\"hidden\\\\\\": false,'
5516    ...   r' \\\\\\"delay\\\\\\": null}]\\""'
5517    ... )
5518    >>> utils.unquoted(eff)[1]
5519    ''
5520    >>> test2 = parseDot(
5521    ...     'digraph {\\n 0 [ name=' + eff + ' label=' + eff + ' ]\\n}'
5522    ... )
5523    >>> s = test2.nodes[0]['name']
5524    >>> s[:25]
5525    'A = "[{\\\\"type\\\\": \\\\"gain\\\\"'
5526    >>> s[25:50]
5527    ', \\\\"applyTo\\\\": \\\\"active\\\\"'
5528    >>> s[50:70]
5529    ', \\\\"value\\\\": \\\\"one\\\\"'
5530    >>> s[70:89]
5531    ', \\\\"charges\\\\": null'
5532    >>> s[89:108]
5533    ', \\\\"hidden\\\\": false'
5534    >>> s[108:]
5535    ', \\\\"delay\\\\": null}]"'
5536    >>> ae = s[s.index('=') + 1:].strip()
5537    >>> uq, after = utils.unquoted(ae)
5538    >>> after
5539    ''
5540    >>> fromJSON(uq) == [base.effect(gain="one")]
5541    True
5542    >>> same = parseDot('''
5543    ... digraph {
5544    ...   0 [ name=A label=A ]
5545    ...     0 -> 1 [
5546    ...       label=up
5547    ...       fullLabel=up
5548    ...       reciprocal=down
5549    ...       req=A
5550    ...     ]
5551    ...   1 [ name=B label=B ]
5552    ...     1 -> 0 [
5553    ...       label=down
5554    ...       fullLabel=down
5555    ...       reciprocal=up
5556    ...       consequence=A
5557    ...     ]
5558    ...   subgraph __requirements__ {
5559    ...     2 [ label="A = \\\\"one|two\\\\"" ]
5560    ...   }
5561    ...   subgraph __consequences__ {
5562    ...     3 [ label=''' + eff + ''' ]
5563    ...   }
5564    ... }''')
5565    >>> c = {'tags': {}, 'annotations': [], 'reciprocal': 'up', 'consequence': [{'type': 'gain', 'applyTo': 'active', 'value': 'one', 'delay': None, 'charges': None}]}['consequence']  # noqa
5566
5567    >>> for diff in tg.listDifferences(same):
5568    ...     print(diff)
5569    >>> same == tg
5570    True
5571    """
5572    lines = dotStr.splitlines()
5573    while lines[0].strip() == '':
5574        lines.pop(0)
5575    if lines.pop(0).strip() != "digraph {":
5576        raise DotParseError("Input doesn't begin with 'digraph {'.")
5577
5578    # Create our result
5579    result = core.DecisionGraph()
5580
5581    # Parse to intermediate graph data structure
5582    graphStuff, remaining = parseDotGraphContents(lines)
5583    if remaining:
5584        if len(remaining) <= 4:
5585            junk = '\n  '.join(repr(line) for line in remaining)
5586        else:
5587            junk = '\n  '.join(repr(line) for line in remaining[:4])
5588            junk += '\n  ...'
5589        raise DotParseError("Extra junk after graph:\n  {junk}")
5590
5591    # Sort out subgraphs to find legends
5592    zoneSubs = []
5593    reqLegend = None
5594    consequenceLegend = None
5595    mechanismLegend = None
5596    for sub in graphStuff['subgraphs']:
5597        if sub[0] == '__requirements__':
5598            reqLegend = sub[1]
5599        elif sub[0] == '__consequences__':
5600            consequenceLegend = sub[1]
5601        elif sub[0] == '__mechanisms__':
5602            mechanismLegend = sub[1]
5603        else:
5604            zoneSubs.append(sub)
5605
5606    # Build out our mapping from requirement abbreviations to actual
5607    # requirement objects
5608    reqMap: Dict[str, base.Requirement] = {}
5609    if reqLegend is not None:
5610        if reqLegend['edges']:
5611            raise DotParseError(
5612                f"Requirements legend subgraph has edges:"
5613                f"\n  {repr(reqLegend['edges'])}"
5614                f"\n(It should only have nodes.)"
5615            )
5616        if reqLegend['attrs']:
5617            raise DotParseError(
5618                f"Requirements legend subgraph has attributes:"
5619                f"\n  {repr(reqLegend['attrs'])}"
5620                f"\n(It should only have nodes.)"
5621            )
5622        if reqLegend['subgraphs']:
5623            raise DotParseError(
5624                f"Requirements legend subgraph has subgraphs:"
5625                f"\n  {repr(reqLegend['subgraphs'])}"
5626                f"\n(It should only have nodes.)"
5627            )
5628        for node, attrs in reqLegend['nodes']:
5629            if not attrs:
5630                raise DotParseError(
5631                    f"Node in requirements legend missing attributes:"
5632                    f"\n  {repr(attrs)}"
5633                )
5634            if len(attrs) != 1:
5635                raise DotParseError(
5636                    f"Node in requirements legend has multiple"
5637                    f" attributes:\n  {repr(attrs)}"
5638                )
5639            reqStr = attrs[0][1]
5640            try:
5641                eqInd = reqStr.index('=')
5642            except ValueError:
5643                raise DotParseError(
5644                    f"Missing '=' in requirement specifier:"
5645                    f"\n  {repr(reqStr)}"
5646                )
5647            ab = reqStr[:eqInd].rstrip()
5648            encoded = reqStr[eqInd + 1:].lstrip()
5649            try:
5650                encVal, empty = utils.unquoted(encoded)
5651            except ValueError:
5652                raise DotParseError(
5653                    f"Invalid quoted requirement value:"
5654                    f"\n  {repr(encoded)}"
5655                )
5656            if empty.strip():
5657                raise DotParseError(
5658                    f"Extra junk after requirement value:"
5659                    f"\n  {repr(empty)}"
5660                )
5661            try:
5662                req = parseFormat.parseRequirement(encVal)
5663            except ValueError:
5664                raise DotParseError(
5665                    f"Invalid encoded requirement in requirements"
5666                    f" legend:\n  {repr(encVal)}"
5667                )
5668            if ab in reqMap:
5669                raise DotParseError(
5670                    f"Abbreviation '{ab}' was defined multiple"
5671                    f" times in requirements legend."
5672                )
5673            reqMap[ab] = req
5674
5675    # Build out our mapping from consequence abbreviations to actual
5676    # consequence lists
5677    consequenceMap: Dict[str, base.Consequence] = {}
5678    if consequenceLegend is not None:
5679        if consequenceLegend['edges']:
5680            raise DotParseError(
5681                f"Consequences legend subgraph has edges:"
5682                f"\n  {repr(consequenceLegend['edges'])}"
5683                f"\n(It should only have nodes.)"
5684            )
5685        if consequenceLegend['attrs']:
5686            raise DotParseError(
5687                f"Consequences legend subgraph has attributes:"
5688                f"\n  {repr(consequenceLegend['attrs'])}"
5689                f"\n(It should only have nodes.)"
5690            )
5691        if consequenceLegend['subgraphs']:
5692            raise DotParseError(
5693                f"Consequences legend subgraph has subgraphs:"
5694                f"\n  {repr(consequenceLegend['subgraphs'])}"
5695                f"\n(It should only have nodes.)"
5696            )
5697        for node, attrs in consequenceLegend['nodes']:
5698            if not attrs:
5699                raise DotParseError(
5700                    f"Node in consequence legend missing attributes:"
5701                    f"\n  {repr(attrs)}"
5702                )
5703            if len(attrs) != 1:
5704                raise DotParseError(
5705                    f"Node in consequences legend has multiple"
5706                    f" attributes:\n  {repr(attrs)}"
5707                )
5708            consStr = attrs[0][1]
5709            try:
5710                eqInd = consStr.index('=')
5711            except ValueError:
5712                raise DotParseError(
5713                    f"Missing '=' in consequence string:"
5714                    f"\n  {repr(consStr)}"
5715                )
5716            ab = consStr[:eqInd].rstrip()
5717            encoded = consStr[eqInd + 1:].lstrip()
5718            try:
5719                encVal, empty = utils.unquoted(encoded)
5720            except ValueError:
5721                raise DotParseError(
5722                    f"Invalid quoted consequence value:"
5723                    f"\n  {repr(encoded)}"
5724                )
5725            if empty.strip():
5726                raise DotParseError(
5727                    f"Extra junk after consequence value:"
5728                    f"\n  {repr(empty)}"
5729                )
5730            try:
5731                consequences = fromJSON(encVal)
5732            except json.decoder.JSONDecodeError:
5733                raise DotParseError(
5734                    f"Invalid encoded consequence in requirements"
5735                    f" legend:\n  {repr(encVal)}"
5736                )
5737            if ab in consequenceMap:
5738                raise DotParseError(
5739                    f"Abbreviation '{ab}' was defined multiple"
5740                    f" times in effects legend."
5741                )
5742            consequenceMap[ab] = consequences
5743
5744    # Reconstruct mechanisms
5745    if mechanismLegend is not None:
5746        if mechanismLegend['edges']:
5747            raise DotParseError(
5748                f"Mechanisms legend subgraph has edges:"
5749                f"\n  {repr(mechanismLegend['edges'])}"
5750                f"\n(It should only have nodes.)"
5751            )
5752        if mechanismLegend['attrs']:
5753            raise DotParseError(
5754                f"Mechanisms legend subgraph has attributes:"
5755                f"\n  {repr(mechanismLegend['attrs'])}"
5756                f"\n(It should only have nodes.)"
5757            )
5758        if mechanismLegend['subgraphs']:
5759            raise DotParseError(
5760                f"Mechanisms legend subgraph has subgraphs:"
5761                f"\n  {repr(mechanismLegend['subgraphs'])}"
5762                f"\n(It should only have nodes.)"
5763            )
5764        for node, attrs in mechanismLegend['nodes']:
5765            if not attrs:
5766                raise DotParseError(
5767                    f"Node in mechanisms legend missing attributes:"
5768                    f"\n  {repr(attrs)}"
5769                )
5770            if len(attrs) != 1:
5771                raise DotParseError(
5772                    f"Node in mechanisms legend has multiple"
5773                    f" attributes:\n  {repr(attrs)}"
5774                )
5775            mechStr = attrs[0][1]
5776            try:
5777                atInd = mechStr.index('@')
5778                colonInd = mechStr.index(':')
5779            except ValueError:
5780                raise DotParseError(
5781                    f"Missing '@' or ':' in mechanism string:"
5782                    f"\n  {repr(mechStr)}"
5783                )
5784            if atInd > colonInd:
5785                raise DotParseError(
5786                    f"':' after '@' in mechanism string:"
5787                    f"\n  {repr(mechStr)}"
5788                )
5789            mID: base.MechanismID
5790            where: Optional[base.DecisionID]
5791            mName: base.MechanismName
5792            try:
5793                mID = int(mechStr[:atInd].rstrip())
5794            except ValueError:
5795                raise DotParseError(
5796                    f"Invalid mechanism ID in mechanism string:"
5797                    f"\n  {repr(mechStr)}"
5798                )
5799            try:
5800                whereStr = mechStr[atInd + 1:colonInd].strip()
5801                if whereStr == "None":
5802                    where = None
5803                else:
5804                    where = int(whereStr)
5805            except ValueError:
5806                raise DotParseError(
5807                    f"Invalid mechanism location in mechanism string:"
5808                    f"\n  {repr(mechStr)}"
5809                )
5810            mName, rest = utils.unquoted(mechStr[colonInd + 1:].lstrip())
5811            if rest.strip():
5812                raise DotParseError(
5813                    f"Junk after mechanism name in mechanism string:"
5814                    f"\n  {repr(mechStr)}"
5815                )
5816            result.mechanisms[mID] = (where, mName)
5817            if where is None:
5818                result.globalMechanisms[mName] = mID
5819
5820    # Add zones to the graph based on parent info
5821    # Map from zones to children we should add to them once all
5822    # zones are created:
5823    zoneChildMap: Dict[str, List[str]] = {}
5824    for prefixedName, graphData in zoneSubs:
5825        # Chop off cluster_ or _ prefix:
5826        zoneName = prefixedName[prefixedName.index('_') + 1:]
5827        if graphData['edges']:
5828            raise DotParseError(
5829                f"Zone subgraph for zone {repr(zoneName)} has edges:"
5830                f"\n  {repr(graphData['edges'])}"
5831                f"\n(It should only have nodes and attributes.)"
5832            )
5833        if graphData['subgraphs']:
5834            raise DotParseError(
5835                f"Zone subgraph for zone {repr(zoneName)} has"
5836                f" subgraphs:"
5837                f"\n  {repr(graphData['subgraphs'])}"
5838                f"\n(It should only have nodes and attributes.)"
5839            )
5840        # Note: we ignore nodes as that info is used for
5841        # visualization but is redundant with the zone parent info
5842        # stored in nodes, and it would be tricky to tease apart
5843        # direct vs. indirect relationships from merged info.
5844        parents = None
5845        level = None
5846        for attr, aVal in graphData['attrs']:
5847            if attr == 'parents':
5848                try:
5849                    parents = set(fromJSON(aVal))
5850                except json.decoder.JSONDecodeError:
5851                    raise DotParseError(
5852                        f"Invalid parents JSON in zone subgraph for"
5853                        f" zone '{zoneName}':\n  {repr(aVal)}"
5854                    )
5855            elif attr == 'level':
5856                try:
5857                    level = int(aVal)
5858                except ValueError:
5859                    raise DotParseError(
5860                        f"Invalid level in zone subgraph for"
5861                        f" zone '{zoneName}':\n  {repr(aVal)}"
5862                    )
5863            elif attr == 'label':
5864                pass  # name already extracted from the subgraph name
5865
5866            else:
5867                raise DotParseError(
5868                    f"Unexpected attribute '{attr}' in zone"
5869                    f" subgraph for zone '{zoneName}'"
5870                )
5871        if parents is None:
5872            raise DotParseError(
5873                f"No parents attribute for zone '{zoneName}'."
5874                f" Graph is:\n  {repr(graphData)}"
5875            )
5876        if level is None:
5877            raise DotParseError(
5878                f"No level attribute for zone '{zoneName}'."
5879                f" Graph is:\n  {repr(graphData)}"
5880            )
5881
5882        # Add ourself to our parents in the child map
5883        for parent in parents:
5884            zoneChildMap.setdefault(parent, []).append(zoneName)
5885
5886        # Create this zone
5887        result.createZone(zoneName, level)
5888
5889    # Add zone parent/child relationships
5890    for parent, children in zoneChildMap.items():
5891        for child in children:
5892            result.addZoneToZone(child, parent)
5893
5894    # Add nodes to the graph
5895    for (node, attrs) in graphStuff['nodes']:
5896        name: Optional[str] = None
5897        annotations = []
5898        tags: Dict[base.Tag, base.TagValue] = {}
5899        zones = []
5900        for attr, aVal in attrs:
5901            if attr == 'name':  # it's the name
5902                name = aVal
5903            elif attr == 'label':  # zone + name; redundant
5904                pass
5905            elif attr.startswith('t_'):  # it's a tag
5906                tagName = attr[2:]
5907                try:
5908                    tagAny = fromJSON(aVal)
5909                except json.decoder.JSONDecodeError:
5910                    raise DotParseError(
5911                        f"Error in JSON for tag attr '{attr}' of node"
5912                        f" '{node}'"
5913                    )
5914                if isinstance(tagAny, base.TagValueTypes):
5915                    tagVal: base.TagValue = cast(base.TagValue, tagAny)
5916                else:
5917                    raise DotParseError(
5918                        f"JSON for tag value encodes disallowed tag"
5919                        f" value of type {type(tagAny)}. Value is:"
5920                        f"\n  {repr(tagAny)}"
5921                    )
5922                tags[tagName] = tagVal
5923            elif attr.startswith('z_'):  # it's a zone
5924                zones.append(attr[2:])
5925            elif attr == 'annotations':  # It's the annotations
5926                try:
5927                    annotations = fromJSON(aVal)
5928                except json.decoder.JSONDecodeError:
5929                    raise DotParseError(
5930                        f"Bad JSON in attribute '{attr}' of node"
5931                        f" '{node}'"
5932                    )
5933            else:
5934                raise DotParseError(
5935                    f"Unrecognized node attribute '{attr}' for node"
5936                    f" '{node}'"
5937                )
5938
5939        # TODO: Domains here?
5940        if name is None:
5941            raise DotParseError(f"Node '{node}' does not have a name.")
5942
5943        result.addIdentifiedDecision(
5944            node,
5945            name,
5946            tags=tags,
5947            annotations=annotations
5948        )
5949        for zone in zones:
5950            try:
5951                result.addDecisionToZone(node, zone)
5952            except core.MissingZoneError:
5953                raise DotParseError(
5954                    f"Zone '{zone}' for node {node} does not"
5955                    f" exist."
5956                )
5957
5958    # Add mechanisms to each node:
5959    for (mID, (where, mName)) in result.mechanisms.items():
5960        mPool = result.nodes[where].setdefault('mechanisms', {})
5961        if mName in mPool:
5962            raise DotParseError(
5963                f"Multiple mechanisms named {mName!r} at"
5964                f" decision {where}."
5965            )
5966        mPool[mName] = mID
5967
5968    # Reciprocals to double-check once all edges are added
5969    recipChecks: Dict[
5970        Tuple[base.DecisionID, base.Transition],
5971        base.Transition
5972    ] = {}
5973
5974    # Add each edge
5975    for (source, dest, attrs) in graphStuff['edges']:
5976        annotations = []
5977        tags = {}
5978        label = None
5979        requirements = None
5980        consequence = None
5981        reciprocal = None
5982        for attr, aVal in attrs:
5983            if attr.startswith('t_'):
5984                try:
5985                    tags[attr[2:]] = fromJSON(aVal)
5986                except json.decoder.JSONDecodeError:
5987                    raise DotParseError(
5988                        f"Invalid JSON in edge tag '{attr}' for edge"
5989                        f"from '{source}' to '{dest}':"
5990                        f"\n  {repr(aVal)}"
5991                    )
5992            elif attr == "label":  # We ignore the short-label
5993                pass
5994            elif attr == "fullLabel":  # This is our transition name
5995                label = aVal
5996            elif attr == "reciprocal":
5997                reciprocal = aVal
5998            elif attr == "req":
5999                reqAbbr = aVal
6000                if reqAbbr not in reqMap:
6001                    raise DotParseError(
6002                        f"Edge from '{source}' to '{dest}' has"
6003                        f" requirement abbreviation '{reqAbbr}'"
6004                        f" but that abbreviation was not listed"
6005                        f" in the '__requirements__' subgraph."
6006                    )
6007                requirements = reqMap[reqAbbr]
6008            elif attr == "consequence":
6009                consequenceAbbr = aVal
6010                if consequenceAbbr not in reqMap:
6011                    raise DotParseError(
6012                        f"Edge from '{source}' to '{dest}' has"
6013                        f" consequence abbreviation"
6014                        f" '{consequenceAbbr}' but that"
6015                        f" abbreviation was not listed in the"
6016                        f" '__consequences__' subgraph."
6017                    )
6018                consequence = consequenceMap[consequenceAbbr]
6019            elif attr == "annotations":
6020                try:
6021                    annotations = fromJSON(aVal)
6022                except json.decoder.JSONDecodeError:
6023                    raise DotParseError(
6024                        f"Invalid JSON in edge annotations for"
6025                        f" edge from '{source}' to '{dest}':"
6026                        f"\n  {repr(aVal)}"
6027                    )
6028            else:
6029                raise DotParseError(
6030                    f"Unrecognized edge attribute '{attr}' for edge"
6031                    f" from '{source}' to '{dest}'"
6032                )
6033
6034        if label is None:
6035            raise DotParseError(
6036                f"Edge from '{source}' to '{dest}' is missing"
6037                f" a 'fullLabel' attribute."
6038            )
6039
6040        # Add the requested transition
6041        result.addTransition(
6042            source,
6043            label,
6044            dest,
6045            tags=tags,
6046            annotations=annotations,
6047            requires=requirements,  # None works here
6048            consequence=consequence  # None works here
6049        )
6050        # Either we're first or our reciprocal is, so this will only
6051        # trigger for one of the pair
6052        if reciprocal is not None:
6053            recipDest = result.getDestination(dest, reciprocal)
6054            if recipDest is None:
6055                recipChecks[(source, label)] = reciprocal
6056                # we'll get set as a reciprocal when that edge is
6057                # instantiated, we hope, but let's check that later
6058            elif recipDest != source:
6059                raise DotParseError(
6060                    f"Transition '{label}' from '{source}' to"
6061                    f" '{dest}' lists reciprocal '{reciprocal}'"
6062                    f" but that transition from '{dest}' goes to"
6063                    f" '{recipDest}', not '{source}'."
6064                )
6065            else:
6066                # At this point we know the reciprocal edge exists
6067                # and has the appropriate destination (our source).
6068                # No need to check for a pre-existing reciprocal as
6069                # this edge is newly created and cannot already have
6070                # a reciprocal assigned.
6071                result.setReciprocal(source, label, reciprocal)
6072
6073    # Double-check skipped reciprocals
6074    for ((source, transition), reciprocal) in recipChecks.items():
6075        actual = result.getReciprocal(source, transition)
6076        if actual != reciprocal:
6077            raise DotParseError(
6078                f"Transition '{transition}' from '{source}' was"
6079                f" expecting to have reciprocal '{reciprocal}' but"
6080                f" all edges have been processed and its reciprocal"
6081                f" is {repr(actual)}."
6082            )
6083
6084    # Finally get graph-level attribute values
6085    for (name, value) in graphStuff['attrs']:
6086        if name == "unknownCount":
6087            try:
6088                result.unknownCount = int(value)
6089            except ValueError:
6090                raise DotParseError(
6091                    f"Invalid 'unknownCount' value {repr(value)}."
6092                )
6093        elif name == "nextID":
6094            try:
6095                result.nextID = int(value)
6096            except ValueError:
6097                raise DotParseError(
6098                    f"Invalid 'nextID' value:"
6099                    f"\n  {repr(value)}"
6100                )
6101            collisionCourse = [x for x in result if x >= result.nextID]
6102            if len(collisionCourse) > 0:
6103                raise DotParseError(
6104                    f"Next ID {value} is wrong because the graph"
6105                    f" already contains one or more node(s) with"
6106                    f" ID(s) that is/are at least that large:"
6107                    f" {collisionCourse}"
6108                )
6109        elif name == "nextMechanismID":
6110            try:
6111                result.nextMechanismID = int(value)
6112            except ValueError:
6113                raise DotParseError(
6114                    f"Invalid 'nextMechanismID' value:"
6115                    f"\n  {repr(value)}"
6116                )
6117        elif name in (
6118            "equivalences",
6119            "reversionTypes",
6120            "mechanisms",
6121            "globalMechanisms",
6122            "nameLookup"
6123        ):
6124            try:
6125                setattr(result, name, fromJSON(value))
6126            except json.decoder.JSONDecodeError:
6127                raise DotParseError(
6128                    f"Invalid JSON in '{name}' attribute:"
6129                    f"\n  {repr(value)}"
6130                )
6131        else:
6132            raise DotParseError(
6133                f"Graph has unexpected attribute '{name}'."
6134            )
6135
6136    # Final check for mechanism ID value after both mechanism ID and
6137    # mechanisms dictionary have been parsed:
6138    leftBehind = [
6139        x
6140        for x in result.mechanisms
6141        if x >= result.nextMechanismID
6142    ]
6143    if len(leftBehind) > 0:
6144        raise DotParseError(
6145            f"Next mechanism ID {value} is wrong because"
6146            f" the graph already contains one or more"
6147            f" node(s) with ID(s) that is/are at least that"
6148            f" large: {leftBehind}"
6149        )
6150
6151    # And we're done!
6152    return result

Converts a graphviz dot-format string into a core.DecisionGraph. A custom ParseFormat may be specified if desired; the default ParseFormat is used if not. Note that this relies on specific indentation schemes used by toDot so a hand-edited dot-format graph will probably not work. A DotParseError is raised if the provided string can't be parsed. Example

>>> parseDotNode(' 3 [ label="A = \\"grate:open\\"" ]')
(3, [('label', 'A = "grate:open"')])
>>> sg = '''    ... subgraph __requirements__ {
...   3 [ label="A = \\"grate:open\\"" ]
...   4 [ label="B = \\"!(helmet)\\"" ]
...   5 [ label="C = \\"helmet\\"" ]
... }'''
>>> parseDotGraphContents(sg.splitlines()[1:])
({'nodes': [(3, [('label', 'A = "grate:open"')]), (4, [('label', 'B = "!(helmet)"')]), (5, [('label', 'C = "helmet"')])], 'edges': [], 'attrs': [], 'subgraphs': []}, [])
>>> from . import core
>>> dg = core.DecisionGraph.example('simple')
>>> encoded = toDot(dg)
>>> reconstructed = parseDot(encoded)
>>> for diff in dg.listDifferences(reconstructed):
...     print(diff)
>>> reconstructed == dg
True
>>> dg = core.DecisionGraph.example('abc')
>>> encoded = toDot(dg)
>>> reconstructed = parseDot(encoded)
>>> for diff in dg.listDifferences(reconstructed):
...     print(diff)
>>> reconstructed == dg
True
>>> tg = core.DecisionGraph()
>>> tg.addDecision('A')
0
>>> tg.addDecision('B')
1
>>> tg.addTransition('A', 'up', 'B', 'down')
>>> same = parseDot('''
... digraph {
...     0 [ name=A label=A ]
...       0 -> 1 [
...         label=up
...         fullLabel=up
...         reciprocal=down
...       ]
...     1 [ name=B label=B ]
...       1 -> 0 [
...         label=down
...         fullLabel=down
...         reciprocal=up
...       ]
... }''')
>>> for diff in tg.listDifferences(same):
...     print(diff)
>>> same == tg
True
>>> pf = ParseFormat()
>>> tg.setTransitionRequirement('A', 'up', pf.parseRequirement('one|two'))
>>> tg.setConsequence(
...     'B',
...     'down',
...     [base.effect(gain="one")]
... )
>>> test = parseDot('''
...   digraph {
...     0 [ name="A = \\"one|two\\"" label="A = \\"one|two\\"" ]
...   }
... ''')
>>> list(test.nodes)
[0]
>>> test.nodes[0]['name']
'A = "one|two"'
>>> eff = (
...   r'"A = \"[{\\\"type\\\": \\\"gain\\\",'
...   r' \\\"applyTo\\\": \\\"active\\\",'
...   r' \\\"value\\\": \\\"one\\\",'
...   r' \\\"charges\\\": null, \\\"hidden\\\": false,'
...   r' \\\"delay\\\": null}]\""'
... )
>>> utils.unquoted(eff)[1]
''
>>> test2 = parseDot(
...     'digraph {\n 0 [ name=' + eff + ' label=' + eff + ' ]\n}'
... )
>>> s = test2.nodes[0]['name']
>>> s[:25]
'A = "[{\\"type\\": \\"gain\\"'
>>> s[25:50]
', \\"applyTo\\": \\"active\\"'
>>> s[50:70]
', \\"value\\": \\"one\\"'
>>> s[70:89]
', \\"charges\\": null'
>>> s[89:108]
', \\"hidden\\": false'
>>> s[108:]
', \\"delay\\": null}]"'
>>> ae = s[s.index('=') + 1:].strip()
>>> uq, after = utils.unquoted(ae)
>>> after
''
>>> fromJSON(uq) == [base.effect(gain="one")]
True
>>> same = parseDot('''
... digraph {
...   0 [ name=A label=A ]
...     0 -> 1 [
...       label=up
...       fullLabel=up
...       reciprocal=down
...       req=A
...     ]
...   1 [ name=B label=B ]
...     1 -> 0 [
...       label=down
...       fullLabel=down
...       reciprocal=up
...       consequence=A
...     ]
...   subgraph __requirements__ {
...     2 [ label="A = \\"one|two\\"" ]
...   }
...   subgraph __consequences__ {
...     3 [ label=''' + eff + ''' ]
...   }
... }''')
>>> c = {'tags': {}, 'annotations': [], 'reciprocal': 'up', 'consequence': [{'type': 'gain', 'applyTo': 'active', 'value': 'one', 'delay': None, 'charges': None}]}['consequence']  # noqa
>>> for diff in tg.listDifferences(same):
...     print(diff)
>>> same == tg
True
def toDot( graph: exploration.core.DecisionGraph, clusterLevels: Union[str, List[int]] = [0]) -> str:
6155def toDot(
6156    graph: core.DecisionGraph,
6157    clusterLevels: Union[str, List[int]] = [0]
6158) -> str:
6159    """
6160    Converts the decision graph into a "dot"-format string suitable
6161    for processing by `graphviz`.
6162
6163    See [the dot language
6164    specification](https://graphviz.org/doc/info/lang.html) for more
6165    detail on the syntax we convert to.
6166
6167    If `clusterLevels` is given, it should be either the string '*',
6168    or a list of integers. '*' means that all zone levels should be
6169    cluster-style subgraphs, while a list of integers specifies that
6170    zones at those levels should be cluster-style subgraphs. This
6171    will prefix the subgraph names with 'cluster_' instead of just
6172    '_'.
6173
6174    TODO: Check edge cases for quotes in capability names, tag names,
6175    transition names, annotations, etc.
6176
6177    TODO: At least colons not allowed in tag names!
6178
6179    TODO: Spaces in decision/transition names? Other special
6180    characters in those names?
6181    """
6182    # Set up result including unknownCount and nextID
6183    result = (
6184        f"digraph {{"
6185        f"\n  unknownCount={graph.unknownCount}"
6186        f"\n  nextID={graph.nextID}"
6187        f"\n  nextMechanismID={graph.nextMechanismID}"
6188        f"\n"
6189    )
6190
6191    # Dictionaries for using letters to substitute for unique
6192    # requirements/consequences found throughout the graph. Keys are
6193    # quoted requirement or consequence reprs, and values are
6194    # abbreviation strings for them.
6195    currentReqKey = utils.nextAbbrKey(None)
6196    currentEffectKey = utils.nextAbbrKey(None)
6197    reqKeys: Dict[str, str] = {}
6198    consequenceKeys: Dict[str, str] = {}
6199
6200    # Add all decision and transition info
6201    decision: base.DecisionID  # TODO: Fix Multidigraph type stubs
6202    for decision in graph.nodes:
6203        nodeInfo = graph.nodes[decision]
6204        tags = nodeInfo.get('tags', {})
6205        annotations = toJSON(nodeInfo.get('annotations', []))
6206        zones = nodeInfo.get('zones', set())
6207        nodeAttrs = f"\n    name={utils.quoted(nodeInfo['name'])}"
6208        immediateZones = [z for z in zones if graph.zoneHierarchyLevel(z) == 0]
6209        if len(immediateZones) > 0:
6210            useZone = sorted(immediateZones)[0]
6211            # TODO: Don't hardcode :: here?
6212            withZone = useZone + "::" + nodeInfo['name']
6213            nodeAttrs += f"\n    label={utils.quoted(withZone)}"
6214        else:
6215            nodeAttrs += f"\n    label={utils.quoted(nodeInfo['name'])}"
6216        for tag, value in tags.items():
6217            rep = utils.quoted(toJSON(value))
6218            nodeAttrs += f"\n    t_{tag}={rep}"
6219        for z in sorted(zones):
6220            nodeAttrs += f"\n    z_{z}=1"
6221        if annotations:
6222            nodeAttrs += '\n    annotations=' + utils.quoted(annotations)
6223
6224        result += f'\n  {decision} [{nodeAttrs}\n  ]'
6225
6226        for (transition, destination) in graph._byEdge[decision].items():
6227            edgeAttrs = (
6228                '\n      label='
6229              + utils.quoted(utils.abbr(transition))
6230            )
6231            edgeAttrs += (
6232                '\n      fullLabel='
6233              + utils.quoted(transition)
6234            )
6235            reciprocal = graph.getReciprocal(decision, transition)
6236            if reciprocal is not None:
6237                edgeAttrs += (
6238                    '\n      reciprocal='
6239                  + utils.quoted(reciprocal)
6240                )
6241            info = graph.edges[
6242                decision,  # type:ignore
6243                destination,
6244                transition
6245            ]
6246            if 'requirement' in info:
6247                # Get string rep for requirement
6248                rep = utils.quoted(info['requirement'].unparse())
6249                # Get assigned abbreviation or assign one
6250                if rep in reqKeys:
6251                    ab = reqKeys[rep]
6252                else:
6253                    ab = currentReqKey
6254                    reqKeys[rep] = ab
6255                    currentReqKey = utils.nextAbbrKey(currentReqKey)
6256                # Add abbreviation as edge attribute
6257                edgeAttrs += f'\n      req={ab}'
6258            if 'consequence' in info:
6259                # Get string representation of consequences
6260                rep = utils.quoted(
6261                    toJSON(info['consequence'])
6262                )
6263                # Get abbreviation for that or assign one:
6264                if rep in consequenceKeys:
6265                    ab = consequenceKeys[rep]
6266                else:
6267                    ab = currentEffectKey
6268                    consequenceKeys[rep] = ab
6269                    currentEffectKey = utils.nextAbbrKey(
6270                        currentEffectKey
6271                    )
6272                # Add abbreviation as an edge attribute
6273                edgeAttrs += f'\n      consequence={ab}'
6274            for (tag, value) in info["tags"].items():
6275                # Get string representation of tag value
6276                rep = utils.quoted(toJSON(value))
6277                # Add edge attribute for tag
6278                edgeAttrs += f'\n      t_{tag}={rep}'
6279            if 'annotations' in info:
6280                edgeAttrs += (
6281                    '\n      annotations='
6282                  + utils.quoted(toJSON(info['annotations']))
6283                )
6284            result += f'\n    {decision} -> {destination}'
6285            result += f' [{edgeAttrs}\n    ]'
6286
6287    # Add zone info as subgraph structure
6288    for z, zinfo in graph.zones.items():
6289        parents = utils.quoted(toJSON(sorted(zinfo.parents)))
6290        if clusterLevels == '*' or zinfo.level in clusterLevels:
6291            zName = "cluster_" + z
6292        else:
6293            zName = '_' + z
6294        zoneSubgraph = f'\n  subgraph {utils.quoted(zName)} {{'
6295        zoneSubgraph += f'\n    label={z}'
6296        zoneSubgraph += f'\n    level={zinfo.level}'
6297        zoneSubgraph += f'\n    parents={parents}'
6298        for decision in sorted(graph.allDecisionsInZone(z)):
6299            zoneSubgraph += f'\n    {decision}'
6300        zoneSubgraph += '\n  }'
6301        result += zoneSubgraph
6302
6303    # Add equivalences, mechanisms, etc.
6304    for attr in [
6305        "equivalences",
6306        "reversionTypes",
6307        "mechanisms",
6308        "globalMechanisms",
6309        "nameLookup"
6310    ]:
6311        aRep = utils.quoted(toJSON(getattr(graph, attr)))
6312        result += f'\n  {attr}={aRep}'
6313
6314    # Add legend subgraphs to represent abbreviations
6315    useID = graph.nextID
6316    if reqKeys:
6317        result += '\n  subgraph __requirements__ {'
6318        for rrepr, ab in reqKeys.items():
6319            nStr = utils.quoted(ab + ' = ' + rrepr)
6320            result += (
6321                f"\n    {useID} [ label={nStr} ]"
6322            )
6323            useID += 1
6324        result += '\n  }'
6325
6326    if consequenceKeys:
6327        result += '\n  subgraph __consequences__ {'
6328        for erepr, ab in consequenceKeys.items():
6329            nStr = utils.quoted(ab + ' = ' + erepr)
6330            result += (
6331                f"\n    {useID} [ label={nStr} ]"
6332            )
6333            useID += 1
6334        result += '\n  }'
6335
6336    if graph.mechanisms:
6337        result += '\n  subgraph __mechanisms__ {'
6338        mID: base.MechanismID
6339        mWhere: Optional[base.DecisionID]
6340        mName: base.MechanismName
6341        for (mID, (mWhere, mName)) in graph.mechanisms.items():
6342            qName = utils.quoted(mName)
6343            nStr = utils.quoted(f"{mID}@{mWhere}:{qName}")
6344            result += (
6345                f"\n    {useID} [ label={nStr} ]"
6346            )
6347            useID += 1
6348        result += '\n  }'
6349
6350    result += "\n}\n"
6351    return result

Converts the decision graph into a "dot"-format string suitable for processing by graphviz.

See the dot language specification for more detail on the syntax we convert to.

If clusterLevels is given, it should be either the string '', or a list of integers. '' means that all zone levels should be cluster-style subgraphs, while a list of integers specifies that zones at those levels should be cluster-style subgraphs. This will prefix the subgraph names with 'cluster_' instead of just '_'.

TODO: Check edge cases for quotes in capability names, tag names, transition names, annotations, etc.

TODO: At least colons not allowed in tag names!

TODO: Spaces in decision/transition names? Other special characters in those names?

T = ~T

Type var for loadCustom.

def loadCustom(stream: <class 'TextIO'>, loadAs: Type[~T]) -> ~T:
6362def loadCustom(stream: TextIO, loadAs: Type[T]) -> T:
6363    """
6364    Loads a new JSON-encodable object from the JSON data in the
6365    given text stream (e.g., a file open in read mode). See
6366    `CustomJSONDecoder` for details on the format and which object types
6367    are supported.
6368
6369    This casts the result to the specified type, but errors out with a
6370    `TypeError` if it doesn't match.
6371    """
6372    result = json.load(stream, cls=CustomJSONDecoder)
6373    if isinstance(result, loadAs):
6374        return result
6375    else:
6376        raise TypeError(
6377            f"Expected to load a {loadAs} but got a {type(result)}."
6378        )

Loads a new JSON-encodable object from the JSON data in the given text stream (e.g., a file open in read mode). See CustomJSONDecoder for details on the format and which object types are supported.

This casts the result to the specified type, but errors out with a TypeError if it doesn't match.

def saveCustom( toSave: Union[exploration.base.MetricSpace, exploration.core.DecisionGraph, exploration.core.DiscreteExploration], stream: <class 'TextIO'>) -> None:
6381def saveCustom(
6382    toSave: Union[  # TODO: More in this union?
6383        base.MetricSpace,
6384        core.DecisionGraph,
6385        core.DiscreteExploration,
6386    ],
6387    stream: TextIO
6388) -> None:
6389    """
6390    Saves a JSON-encodable object as JSON into the given text stream
6391    (e.g., a file open in writing mode). See `CustomJSONEncoder` for
6392    details on the format and which types are supported..
6393    """
6394    json.dump(toSave, stream, cls=CustomJSONEncoder)

Saves a JSON-encodable object as JSON into the given text stream (e.g., a file open in writing mode). See CustomJSONEncoder for details on the format and which types are supported..

def toJSON(obj: Any) -> str:
6397def toJSON(obj: Any) -> str:
6398    """
6399    Defines the standard object -> JSON operation using the
6400    `CustomJSONEncoder` as well as not using `sort_keys`.
6401    """
6402    return CustomJSONEncoder(sort_keys=False).encode(obj)

Defines the standard object -> JSON operation using the CustomJSONEncoder as well as not using sort_keys.

def fromJSON(encoded: str) -> Any:
6405def fromJSON(encoded: str) -> Any:
6406    """
6407    Defines the standard JSON -> object operation using
6408    `CustomJSONDecoder`.
6409    """
6410    return json.loads(encoded, cls=CustomJSONDecoder)

Defines the standard JSON -> object operation using CustomJSONDecoder.

class CustomJSONEncoder(json.encoder.JSONEncoder):
6413class CustomJSONEncoder(json.JSONEncoder):
6414    """
6415    A custom JSON encoder that has special protocols for handling the
6416    smae objects that `CustomJSONDecoder` decodes. It handles these
6417    objects specially so that they can be decoded back to their original
6418    form.
6419
6420    Examples:
6421
6422    >>> from . import core
6423    >>> tupList = [(1, 1), (2, 2)]
6424    >>> encTup = toJSON(tupList)
6425    >>> encTup
6426    '[{"^^d": "tuple", "values": [1, 1]}, {"^^d": "tuple", "values": [2, 2]}]'
6427    >>> fromJSON(encTup) == tupList
6428    True
6429    >>> dg = core.DecisionGraph.example('simple')
6430    >>> fromJSON(toJSON(dg)) == dg
6431    True
6432    >>> dg = core.DecisionGraph.example('abc')
6433    >>> zi = dg.getZoneInfo('upZone')
6434    >>> zi
6435    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6436 annotations=[])
6437    >>> zj = toJSON(zi)
6438    >>> zj
6439    '{"^^d": "namedtuple", "name": "ZoneInfo", "values":\
6440 {"level": 1, "parents": {"^^d": "set", "values": []},\
6441 "contents": {"^^d": "set", "values": ["zoneA"]}, "tags": {},\
6442 "annotations": []}}'
6443    >>> fromJSON(toJSON(zi))
6444    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6445 annotations=[])
6446    >>> fromJSON(toJSON(zi)) == zi
6447    True
6448    >>> toJSON({'a': 'b', 1: 2})
6449    '{"^^d": "dict", "items": [["a", "b"], [1, 2]]}'
6450    >>> toJSON(((1, 2), (3, 4)))
6451    '{"^^d": "tuple", "values": [{"^^d": "tuple", "values": [1, 2]},\
6452 {"^^d": "tuple", "values": [3, 4]}]}'
6453    >>> toJSON(base.effect(set=('grate', 'open')))
6454    '{"type": "set", "applyTo": "active",\
6455 "value": {"^^d": "tuple",\
6456 "values": [{"^^d": "namedtuple", "name": "MechanismSpecifier",\
6457 "values": {"domain": null, "zone": null, "decision": null, "name": "grate"}},\
6458 "open"]}, "delay": null, "charges": null, "hidden": false}'
6459    >>> j = toJSON(dg)
6460    >>> expected = (
6461    ... '{"^^d": "DecisionGraph",'
6462    ... ' "props": {},'
6463    ... ' "node_links": {"directed": true,'
6464    ... ' "multigraph": true,'
6465    ... ' "graph": {},'
6466    ... ' "nodes": ['
6467    ... '{"name": "A", "domain": "main", "tags": {},'
6468    ... ' "annotations": ["This is a multi-word \\\\"annotation.\\\\""],'
6469    ... ' "zones": {"^^d": "set", "values": ["zoneA"]},'
6470    ... ' "mechanisms": {"grate": 0},'
6471    ... ' "id": 0'
6472    ... '},'
6473    ... ' {'
6474    ... '"name": "B",'
6475    ... ' "domain": "main",'
6476    ... ' "tags": {"b": 1, "tag2": "\\\\"value\\\\""},'
6477    ... ' "annotations": [],'
6478    ... ' "zones": {"^^d": "set", "values": ["zoneB"]},'
6479    ... ' "id": 1'
6480    ... '},'
6481    ... ' {'
6482    ... '"name": "C",'
6483    ... ' "domain": "main",'
6484    ... ' "tags": {"aw\\\\"ful": "ha\\'ha"},'
6485    ... ' "annotations": [],'
6486    ... ' "zones": {"^^d": "set", "values": ["zoneA"]},'
6487    ... ' "id": 2'
6488    ... '}'
6489    ... '],'
6490    ... ' "links": ['
6491    ... '{'
6492    ... '"tags": {},'
6493    ... ' "annotations": [],'
6494    ... ' "reciprocal": "right",'
6495    ... ' "source": 0,'
6496    ... ' "target": 1,'
6497    ... ' "key": "left"'
6498    ... '},'
6499    ... ' {'
6500    ... '"tags": {},'
6501    ... ' "annotations": [],'
6502    ... ' "reciprocal": "up_right",'
6503    ... ' "requirement": {"^^d": "Requirement", "value": "grate:open"},'
6504    ... ' "source": 0,'
6505    ... ' "target": 1,'
6506    ... ' "key": "up_left"'
6507    ... '},'
6508    ... ' {'
6509    ... '"tags": {},'
6510    ... ' "annotations": ["Transition \\'annotation.\\'"],'
6511    ... ' "reciprocal": "up",'
6512    ... ' "source": 0,'
6513    ... ' "target": 2,'
6514    ... ' "key": "down"'
6515    ... '},'
6516    ... ' {'
6517    ... '"tags": {},'
6518    ... ' "annotations": [],'
6519    ... ' "reciprocal": "left",'
6520    ... ' "source": 1,'
6521    ... ' "target": 0,'
6522    ... ' "key": "right"'
6523    ... '},'
6524    ... ' {'
6525    ... '"tags": {},'
6526    ... ' "annotations": [],'
6527    ... ' "reciprocal": "up_left",'
6528    ... ' "requirement": {"^^d": "Requirement", "value": "grate:open"},'
6529    ... ' "source": 1,'
6530    ... ' "target": 0,'
6531    ... ' "key": "up_right"'
6532    ... '},'
6533    ... ' {'
6534    ... '"tags": {"fast": 1},'
6535    ... ' "annotations": [],'
6536    ... ' "reciprocal": "down",'
6537    ... ' "source": 2,'
6538    ... ' "target": 0,'
6539    ... ' "key": "up"'
6540    ... '},'
6541    ... ' {'
6542    ... '"tags": {},'
6543    ... ' "annotations": [],'
6544    ... ' "requirement": {"^^d": "Requirement", "value": "!(helmet)"},'
6545    ... ' "consequence": ['
6546    ... '{'
6547    ... '"type": "gain", "applyTo": "active", "value": "helmet",'
6548    ... ' "delay": null, "charges": null, "hidden": false'
6549    ... '},'
6550    ... ' {'
6551    ... '"type": "deactivate",'
6552    ... ' "applyTo": "active", "value": null,'
6553    ... ' "delay": 3, "charges": null, "hidden": false'
6554    ... '}'
6555    ... '],'
6556    ... ' "source": 2,'
6557    ... ' "target": 2,'
6558    ... ' "key": "grab_helmet"'
6559    ... '},'
6560    ... ' {'
6561    ... '"tags": {},'
6562    ... ' "annotations": [],'
6563    ... ' "requirement": {"^^d": "Requirement", "value": "helmet"},'
6564    ... ' "consequence": ['
6565    ... '{"type": "lose", "applyTo": "active", "value": "helmet",'
6566    ... ' "delay": null, "charges": null, "hidden": false},'
6567    ... ' {"type": "gain", "applyTo": "active",'
6568    ... ' "value": {"^^d": "tuple", "values": ["token", 1]},'
6569    ... ' "delay": null, "charges": null, "hidden": false'
6570    ... '},'
6571    ... ' {"condition":'
6572    ... ' {"^^d": "Requirement", "value": "token*2"},'
6573    ... ' "consequence": ['
6574    ... '{"type": "set", "applyTo": "active",'
6575    ... ' "value": {"^^d": "tuple", "values": ['
6576    ... '{"^^d": "namedtuple", "name": "MechanismSpecifier",'
6577    ... ' "values": {"domain": null, "zone": null, "decision": null,'
6578    ... ' "name": "grate"}}, "open"]},'
6579    ... ' "delay": null, "charges": null, "hidden": false'
6580    ... '},'
6581    ... ' {"type": "deactivate", "applyTo": "active", "value": null,'
6582    ... ' "delay": null, "charges": null, "hidden": false'
6583    ... '}'
6584    ... '],'
6585    ... ' "alternative": []'
6586    ... '}'
6587    ... '],'
6588    ... ' "source": 2,'
6589    ... ' "target": 2,'
6590    ... ' "key": "pull_lever"'
6591    ... '}'
6592    ... ']'
6593    ... '},'
6594    ... ' "_byEdge": {"^^d": "dict", "items":'
6595    ... ' [[0, {"left": 1, "up_left": 1, "down": 2}],'
6596    ... ' [1, {"right": 0, "up_right": 0}],'
6597    ... ' [2, {"up": 0, "grab_helmet": 2, "pull_lever": 2}]]},'
6598    ... ' "zones": {"zoneA":'
6599    ... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
6600    ... ' "values": {'
6601    ... '"level": 0,'
6602    ... ' "parents": {"^^d": "set", "values": ["upZone"]},'
6603    ... ' "contents": {"^^d": "set", "values": [0, 2]},'
6604    ... ' "tags": {},'
6605    ... ' "annotations": []'
6606    ... '}'
6607    ... '},'
6608    ... ' "zoneB":'
6609    ... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
6610    ... ' "values": {'
6611    ... '"level": 0,'
6612    ... ' "parents": {"^^d": "set", "values": []},'
6613    ... ' "contents": {"^^d": "set", "values": [1]},'
6614    ... ' "tags": {},'
6615    ... ' "annotations": []'
6616    ... '}'
6617    ... '},'
6618    ... ' "upZone":'
6619    ... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
6620    ... ' "values": {'
6621    ... '"level": 1,'
6622    ... ' "parents": {"^^d": "set", "values": []},'
6623    ... ' "contents": {"^^d": "set", "values": ["zoneA"]},'
6624    ... ' "tags": {},'
6625    ... ' "annotations": []'
6626    ... '}'
6627    ... '}'
6628    ... '},'
6629    ... ' "unknownCount": 0,'
6630    ... ' "equivalences": {"^^d": "dict", "items": ['
6631    ... '[{"^^d": "tuple", "values": [0, "open"]},'
6632    ... ' {"^^d": "set", "values": ['
6633    ... '{"^^d": "Requirement", "value": "helmet"}]}]'
6634    ... ']},'
6635    ... ' "reversionTypes": {},'
6636    ... ' "nextMechanismID": 1,'
6637    ... ' "mechanisms": {"^^d": "dict", "items": ['
6638    ... '[0, {"^^d": "tuple", "values": [0, "grate"]}]]},'
6639    ... ' "globalMechanisms": {},'
6640    ... ' "nameLookup": {"A": [0], "B": [1], "C": [2]}'
6641    ... '}'
6642    ... )
6643    >>> for i in range(len(j)):
6644    ...     if j[i] != expected[i:i+1]:
6645    ...         print(
6646    ...             'exp: ' + expected[i-10:i+50] + '\\ngot: ' + j[i-10:i+50]
6647    ...         )
6648    ...         break
6649    >>> j == expected
6650    True
6651    >>> rec = fromJSON(j)
6652    >>> rec.nodes == dg.nodes
6653    True
6654    >>> rec.edges == dg.edges
6655    True
6656    >>> rec.unknownCount == dg.unknownCount
6657    True
6658    >>> rec.equivalences == dg.equivalences
6659    True
6660    >>> rec.reversionTypes == dg.reversionTypes
6661    True
6662    >>> rec._byEdge == dg._byEdge
6663    True
6664    >>> rec.zones == dg.zones
6665    True
6666    >>> for diff in dg.listDifferences(rec):
6667    ...     print(diff)
6668    >>> rec == dg
6669    True
6670
6671    `base.MetricSpace` example:
6672
6673    >>> ms = base.MetricSpace("test")
6674    >>> ms.addPoint([2, 3])
6675    0
6676    >>> ms.addPoint([2, 7, 0])
6677    1
6678    >>> ms.addPoint([2, 7])
6679    2
6680    >>> toJSON(ms) # TODO: ^^d entries here
6681    '{"^^d": "MetricSpace", "name": "test",\
6682 "points": {"^^d": "dict", "items": [[0, [2, 3]], [1, [2, 7,\
6683 0]], [2, [2, 7]]]}, "lastID": 2}'
6684    >>> ms.removePoint(0)
6685    >>> ms.removePoint(1)
6686    >>> ms.removePoint(2)
6687    >>> toJSON(ms)
6688    '{"^^d": "MetricSpace", "name": "test", "points": {}, "lastID": 2}'
6689    >>> ms.addPoint([5, 6])
6690    3
6691    >>> ms.addPoint([7, 8])
6692    4
6693    >>> toJSON(ms)
6694    '{"^^d": "MetricSpace", "name": "test",\
6695 "points": {"^^d": "dict", "items": [[3, [5, 6]], [4, [7, 8]]]}, "lastID": 4}'
6696
6697    # TODO: more examples, including one for a DiscreteExploration
6698    """
6699
6700    def default(self, o: Any) -> Any:
6701        """
6702        Re-writes objects for encoding. We re-write the following
6703        objects:
6704
6705        - `set`
6706        - `dict` (if the keys aren't all strings)
6707        - `tuple`/`namedtuple`
6708        - `ZoneInfo`
6709        - `Requirement`
6710        - `SkillCombination`
6711        - `DecisionGraph`
6712        - `DiscreteExploration`
6713        - `MetricSpace`
6714
6715        TODO: FeatureGraph...
6716        """
6717        if isinstance(o, list):
6718            return [self.default(x) for x in o]
6719
6720        elif isinstance(o, set):
6721            return {
6722                '^^d': 'set',
6723                'values': sorted(
6724                    [self.default(e) for e in o],
6725                    key=lambda x: str(x)
6726                )
6727            }
6728
6729        elif isinstance(o, dict):
6730            if all(isinstance(k, str) for k in o):
6731                return {
6732                    k: self.default(v)
6733                    for k, v in o.items()
6734                }
6735            else:
6736                return {
6737                    '^^d': 'dict',
6738                    'items': [
6739                        [self.default(k), self.default(v)]
6740                        for (k, v) in o.items()
6741                    ]
6742                }
6743
6744        elif isinstance(o, tuple):
6745            if hasattr(o, '_fields') and hasattr(o, '_asdict'):
6746                # Named tuple
6747                return {
6748                    '^^d': 'namedtuple',
6749                    'name': o.__class__.__name__,
6750                    'values': {
6751                        k: self.default(v)
6752                        for k, v in o._asdict().items()
6753                    }
6754                }
6755            else:
6756                # Normal tuple
6757                return {
6758                    "^^d": "tuple",
6759                    "values": [self.default(e) for e in o]
6760                }
6761
6762        elif isinstance(o, base.Requirement):
6763            return {
6764                '^^d': 'Requirement',
6765                'value': o.unparse()
6766            }
6767
6768        elif isinstance(o, base.SkillCombination):
6769            return {
6770                '^^d': 'SkillCombination',
6771                'value': o.unparse()
6772            }
6773
6774        elif isinstance(o, core.DecisionGraph):
6775            return {
6776                '^^d': 'DecisionGraph',
6777                'props': self.default(o.graph),  # type:ignore [attr-defined]
6778                'node_links': self.default(networkx.node_link_data(o)),
6779                '_byEdge': self.default(o._byEdge),
6780                'zones': self.default(o.zones),
6781                'unknownCount': o.unknownCount,
6782                'equivalences': self.default(o.equivalences),
6783                'reversionTypes': self.default(o.reversionTypes),
6784                'nextMechanismID': o.nextMechanismID,
6785                'mechanisms': self.default(o.mechanisms),
6786                'globalMechanisms': self.default(o.globalMechanisms),
6787                'nameLookup': self.default(o.nameLookup)
6788            }
6789
6790        elif isinstance(o, core.DiscreteExploration):
6791            return {
6792                '^^d': 'DiscreteExploration',
6793                'situations': self.default(o.situations)
6794            }
6795
6796        elif isinstance(o, base.MetricSpace):
6797            return {
6798                '^^d': 'MetricSpace',
6799                'name': o.name,
6800                'points': self.default(o.points),
6801                'lastID': o.lastID()
6802            }
6803
6804        else:
6805            return o
6806
6807    def encode(self, o: Any) -> str:
6808        """
6809        Custom encode function since we need to override behavior for
6810        tuples and dicts.
6811        """
6812        if isinstance(o, (tuple, dict, set)):
6813            o = self.default(o)
6814        elif isinstance(o, list):
6815            o = [self.default(x) for x in o]
6816
6817        try:
6818            return super().encode(o)
6819        except TypeError:
6820            return super().encode(self.default(o))
6821
6822    def iterencode(
6823        self,
6824        o: Any,
6825        _one_shot: bool = False
6826    ) -> Generator[str, None, None]:
6827        """
6828        Custom iterencode function since we need to override behavior for
6829        tuples and dicts.
6830        """
6831        if isinstance(o, (tuple, dict)):
6832            o = self.default(o)
6833
6834        yield from super().iterencode(o, _one_shot=_one_shot)

A custom JSON encoder that has special protocols for handling the smae objects that CustomJSONDecoder decodes. It handles these objects specially so that they can be decoded back to their original form.

Examples:

>>> from . import core
>>> tupList = [(1, 1), (2, 2)]
>>> encTup = toJSON(tupList)
>>> encTup
'[{"^^d": "tuple", "values": [1, 1]}, {"^^d": "tuple", "values": [2, 2]}]'
>>> fromJSON(encTup) == tupList
True
>>> dg = core.DecisionGraph.example('simple')
>>> fromJSON(toJSON(dg)) == dg
True
>>> dg = core.DecisionGraph.example('abc')
>>> zi = dg.getZoneInfo('upZone')
>>> zi
ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={}, annotations=[])
>>> zj = toJSON(zi)
>>> zj
'{"^^d": "namedtuple", "name": "ZoneInfo", "values": {"level": 1, "parents": {"^^d": "set", "values": []}, "contents": {"^^d": "set", "values": ["zoneA"]}, "tags": {}, "annotations": []}}'
>>> fromJSON(toJSON(zi))
ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={}, annotations=[])
>>> fromJSON(toJSON(zi)) == zi
True
>>> toJSON({'a': 'b', 1: 2})
'{"^^d": "dict", "items": [["a", "b"], [1, 2]]}'
>>> toJSON(((1, 2), (3, 4)))
'{"^^d": "tuple", "values": [{"^^d": "tuple", "values": [1, 2]}, {"^^d": "tuple", "values": [3, 4]}]}'
>>> toJSON(base.effect(set=('grate', 'open')))
'{"type": "set", "applyTo": "active", "value": {"^^d": "tuple", "values": [{"^^d": "namedtuple", "name": "MechanismSpecifier", "values": {"domain": null, "zone": null, "decision": null, "name": "grate"}}, "open"]}, "delay": null, "charges": null, "hidden": false}'
>>> j = toJSON(dg)
>>> expected = (
... '{"^^d": "DecisionGraph",'
... ' "props": {},'
... ' "node_links": {"directed": true,'
... ' "multigraph": true,'
... ' "graph": {},'
... ' "nodes": ['
... '{"name": "A", "domain": "main", "tags": {},'
... ' "annotations": ["This is a multi-word \\"annotation.\\""],'
... ' "zones": {"^^d": "set", "values": ["zoneA"]},'
... ' "mechanisms": {"grate": 0},'
... ' "id": 0'
... '},'
... ' {'
... '"name": "B",'
... ' "domain": "main",'
... ' "tags": {"b": 1, "tag2": "\\"value\\""},'
... ' "annotations": [],'
... ' "zones": {"^^d": "set", "values": ["zoneB"]},'
... ' "id": 1'
... '},'
... ' {'
... '"name": "C",'
... ' "domain": "main",'
... ' "tags": {"aw\\"ful": "ha\'ha"},'
... ' "annotations": [],'
... ' "zones": {"^^d": "set", "values": ["zoneA"]},'
... ' "id": 2'
... '}'
... '],'
... ' "links": ['
... '{'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "right",'
... ' "source": 0,'
... ' "target": 1,'
... ' "key": "left"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "up_right",'
... ' "requirement": {"^^d": "Requirement", "value": "grate:open"},'
... ' "source": 0,'
... ' "target": 1,'
... ' "key": "up_left"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": ["Transition \'annotation.\'"],'
... ' "reciprocal": "up",'
... ' "source": 0,'
... ' "target": 2,'
... ' "key": "down"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "left",'
... ' "source": 1,'
... ' "target": 0,'
... ' "key": "right"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "up_left",'
... ' "requirement": {"^^d": "Requirement", "value": "grate:open"},'
... ' "source": 1,'
... ' "target": 0,'
... ' "key": "up_right"'
... '},'
... ' {'
... '"tags": {"fast": 1},'
... ' "annotations": [],'
... ' "reciprocal": "down",'
... ' "source": 2,'
... ' "target": 0,'
... ' "key": "up"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "requirement": {"^^d": "Requirement", "value": "!(helmet)"},'
... ' "consequence": ['
... '{'
... '"type": "gain", "applyTo": "active", "value": "helmet",'
... ' "delay": null, "charges": null, "hidden": false'
... '},'
... ' {'
... '"type": "deactivate",'
... ' "applyTo": "active", "value": null,'
... ' "delay": 3, "charges": null, "hidden": false'
... '}'
... '],'
... ' "source": 2,'
... ' "target": 2,'
... ' "key": "grab_helmet"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "requirement": {"^^d": "Requirement", "value": "helmet"},'
... ' "consequence": ['
... '{"type": "lose", "applyTo": "active", "value": "helmet",'
... ' "delay": null, "charges": null, "hidden": false},'
... ' {"type": "gain", "applyTo": "active",'
... ' "value": {"^^d": "tuple", "values": ["token", 1]},'
... ' "delay": null, "charges": null, "hidden": false'
... '},'
... ' {"condition":'
... ' {"^^d": "Requirement", "value": "token*2"},'
... ' "consequence": ['
... '{"type": "set", "applyTo": "active",'
... ' "value": {"^^d": "tuple", "values": ['
... '{"^^d": "namedtuple", "name": "MechanismSpecifier",'
... ' "values": {"domain": null, "zone": null, "decision": null,'
... ' "name": "grate"}}, "open"]},'
... ' "delay": null, "charges": null, "hidden": false'
... '},'
... ' {"type": "deactivate", "applyTo": "active", "value": null,'
... ' "delay": null, "charges": null, "hidden": false'
... '}'
... '],'
... ' "alternative": []'
... '}'
... '],'
... ' "source": 2,'
... ' "target": 2,'
... ' "key": "pull_lever"'
... '}'
... ']'
... '},'
... ' "_byEdge": {"^^d": "dict", "items":'
... ' [[0, {"left": 1, "up_left": 1, "down": 2}],'
... ' [1, {"right": 0, "up_right": 0}],'
... ' [2, {"up": 0, "grab_helmet": 2, "pull_lever": 2}]]},'
... ' "zones": {"zoneA":'
... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
... ' "values": {'
... '"level": 0,'
... ' "parents": {"^^d": "set", "values": ["upZone"]},'
... ' "contents": {"^^d": "set", "values": [0, 2]},'
... ' "tags": {},'
... ' "annotations": []'
... '}'
... '},'
... ' "zoneB":'
... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
... ' "values": {'
... '"level": 0,'
... ' "parents": {"^^d": "set", "values": []},'
... ' "contents": {"^^d": "set", "values": [1]},'
... ' "tags": {},'
... ' "annotations": []'
... '}'
... '},'
... ' "upZone":'
... ' {"^^d": "namedtuple", "name": "ZoneInfo",'
... ' "values": {'
... '"level": 1,'
... ' "parents": {"^^d": "set", "values": []},'
... ' "contents": {"^^d": "set", "values": ["zoneA"]},'
... ' "tags": {},'
... ' "annotations": []'
... '}'
... '}'
... '},'
... ' "unknownCount": 0,'
... ' "equivalences": {"^^d": "dict", "items": ['
... '[{"^^d": "tuple", "values": [0, "open"]},'
... ' {"^^d": "set", "values": ['
... '{"^^d": "Requirement", "value": "helmet"}]}]'
... ']},'
... ' "reversionTypes": {},'
... ' "nextMechanismID": 1,'
... ' "mechanisms": {"^^d": "dict", "items": ['
... '[0, {"^^d": "tuple", "values": [0, "grate"]}]]},'
... ' "globalMechanisms": {},'
... ' "nameLookup": {"A": [0], "B": [1], "C": [2]}'
... '}'
... )
>>> for i in range(len(j)):
...     if j[i] != expected[i:i+1]:
...         print(
...             'exp: ' + expected[i-10:i+50] + '\ngot: ' + j[i-10:i+50]
...         )
...         break
>>> j == expected
True
>>> rec = fromJSON(j)
>>> rec.nodes == dg.nodes
True
>>> rec.edges == dg.edges
True
>>> rec.unknownCount == dg.unknownCount
True
>>> rec.equivalences == dg.equivalences
True
>>> rec.reversionTypes == dg.reversionTypes
True
>>> rec._byEdge == dg._byEdge
True
>>> rec.zones == dg.zones
True
>>> for diff in dg.listDifferences(rec):
...     print(diff)
>>> rec == dg
True

base.MetricSpace example:

>>> ms = base.MetricSpace("test")
>>> ms.addPoint([2, 3])
0
>>> ms.addPoint([2, 7, 0])
1
>>> ms.addPoint([2, 7])
2
>>> toJSON(ms) # TODO: ^^d entries here
'{"^^d": "MetricSpace", "name": "test", "points": {"^^d": "dict", "items": [[0, [2, 3]], [1, [2, 7, 0]], [2, [2, 7]]]}, "lastID": 2}'
>>> ms.removePoint(0)
>>> ms.removePoint(1)
>>> ms.removePoint(2)
>>> toJSON(ms)
'{"^^d": "MetricSpace", "name": "test", "points": {}, "lastID": 2}'
>>> ms.addPoint([5, 6])
3
>>> ms.addPoint([7, 8])
4
>>> toJSON(ms)
'{"^^d": "MetricSpace", "name": "test", "points": {"^^d": "dict", "items": [[3, [5, 6]], [4, [7, 8]]]}, "lastID": 4}'

TODO: more examples, including one for a DiscreteExploration

def default(self, o: Any) -> Any:
6700    def default(self, o: Any) -> Any:
6701        """
6702        Re-writes objects for encoding. We re-write the following
6703        objects:
6704
6705        - `set`
6706        - `dict` (if the keys aren't all strings)
6707        - `tuple`/`namedtuple`
6708        - `ZoneInfo`
6709        - `Requirement`
6710        - `SkillCombination`
6711        - `DecisionGraph`
6712        - `DiscreteExploration`
6713        - `MetricSpace`
6714
6715        TODO: FeatureGraph...
6716        """
6717        if isinstance(o, list):
6718            return [self.default(x) for x in o]
6719
6720        elif isinstance(o, set):
6721            return {
6722                '^^d': 'set',
6723                'values': sorted(
6724                    [self.default(e) for e in o],
6725                    key=lambda x: str(x)
6726                )
6727            }
6728
6729        elif isinstance(o, dict):
6730            if all(isinstance(k, str) for k in o):
6731                return {
6732                    k: self.default(v)
6733                    for k, v in o.items()
6734                }
6735            else:
6736                return {
6737                    '^^d': 'dict',
6738                    'items': [
6739                        [self.default(k), self.default(v)]
6740                        for (k, v) in o.items()
6741                    ]
6742                }
6743
6744        elif isinstance(o, tuple):
6745            if hasattr(o, '_fields') and hasattr(o, '_asdict'):
6746                # Named tuple
6747                return {
6748                    '^^d': 'namedtuple',
6749                    'name': o.__class__.__name__,
6750                    'values': {
6751                        k: self.default(v)
6752                        for k, v in o._asdict().items()
6753                    }
6754                }
6755            else:
6756                # Normal tuple
6757                return {
6758                    "^^d": "tuple",
6759                    "values": [self.default(e) for e in o]
6760                }
6761
6762        elif isinstance(o, base.Requirement):
6763            return {
6764                '^^d': 'Requirement',
6765                'value': o.unparse()
6766            }
6767
6768        elif isinstance(o, base.SkillCombination):
6769            return {
6770                '^^d': 'SkillCombination',
6771                'value': o.unparse()
6772            }
6773
6774        elif isinstance(o, core.DecisionGraph):
6775            return {
6776                '^^d': 'DecisionGraph',
6777                'props': self.default(o.graph),  # type:ignore [attr-defined]
6778                'node_links': self.default(networkx.node_link_data(o)),
6779                '_byEdge': self.default(o._byEdge),
6780                'zones': self.default(o.zones),
6781                'unknownCount': o.unknownCount,
6782                'equivalences': self.default(o.equivalences),
6783                'reversionTypes': self.default(o.reversionTypes),
6784                'nextMechanismID': o.nextMechanismID,
6785                'mechanisms': self.default(o.mechanisms),
6786                'globalMechanisms': self.default(o.globalMechanisms),
6787                'nameLookup': self.default(o.nameLookup)
6788            }
6789
6790        elif isinstance(o, core.DiscreteExploration):
6791            return {
6792                '^^d': 'DiscreteExploration',
6793                'situations': self.default(o.situations)
6794            }
6795
6796        elif isinstance(o, base.MetricSpace):
6797            return {
6798                '^^d': 'MetricSpace',
6799                'name': o.name,
6800                'points': self.default(o.points),
6801                'lastID': o.lastID()
6802            }
6803
6804        else:
6805            return o

Re-writes objects for encoding. We re-write the following objects:

  • set
  • dict (if the keys aren't all strings)
  • tuple/namedtuple
  • ZoneInfo
  • Requirement
  • SkillCombination
  • DecisionGraph
  • DiscreteExploration
  • MetricSpace

TODO: FeatureGraph...

def encode(self, o: Any) -> str:
6807    def encode(self, o: Any) -> str:
6808        """
6809        Custom encode function since we need to override behavior for
6810        tuples and dicts.
6811        """
6812        if isinstance(o, (tuple, dict, set)):
6813            o = self.default(o)
6814        elif isinstance(o, list):
6815            o = [self.default(x) for x in o]
6816
6817        try:
6818            return super().encode(o)
6819        except TypeError:
6820            return super().encode(self.default(o))

Custom encode function since we need to override behavior for tuples and dicts.

def iterencode( self, o: Any, _one_shot: bool = False) -> Generator[str, NoneType, NoneType]:
6822    def iterencode(
6823        self,
6824        o: Any,
6825        _one_shot: bool = False
6826    ) -> Generator[str, None, None]:
6827        """
6828        Custom iterencode function since we need to override behavior for
6829        tuples and dicts.
6830        """
6831        if isinstance(o, (tuple, dict)):
6832            o = self.default(o)
6833
6834        yield from super().iterencode(o, _one_shot=_one_shot)

Custom iterencode function since we need to override behavior for tuples and dicts.

Inherited Members
json.encoder.JSONEncoder
JSONEncoder
item_separator
key_separator
skipkeys
ensure_ascii
check_circular
allow_nan
sort_keys
indent
class CustomJSONDecoder(json.decoder.JSONDecoder):
6837class CustomJSONDecoder(json.JSONDecoder):
6838    """
6839    A custom JSON decoder that has special protocols for handling
6840    several types, including:
6841
6842    - `set`
6843    - `tuple` & `namedtuple`
6844    - `dict` (where keys aren't all strings)
6845    - `Requirement`
6846    - `SkillCombination`
6847    - `DecisionGraph`
6848    - `DiscreteExploration`
6849    - `MetricSpace`
6850
6851    Used by `toJSON`
6852
6853    When initializing it, you can st a custom parse format by supplying
6854    a 'parseFormat' keyword argument; by default a standard
6855    `ParseFormat` will be used.
6856
6857    Examples:
6858
6859    >>> r = base.ReqAny([
6860    ...     base.ReqCapability('power'),
6861    ...     base.ReqTokens('money', 5)
6862    ... ])
6863    >>> s = toJSON(r)
6864    >>> s
6865    '{"^^d": "Requirement", "value": "(power|money*5)"}'
6866    >>> l = fromJSON(s)
6867    >>> r == l
6868    True
6869    >>> o = {1, 2, 'hi'}
6870    >>> s = toJSON(o)
6871    >>> s
6872    '{"^^d": "set", "values": [1, 2, "hi"]}'
6873    >>> l = fromJSON(s)
6874    >>> o == l
6875    True
6876    >>> zi = base.ZoneInfo(1, set(), set(), {}, [])
6877    >>> s = toJSON(zi)
6878    >>> c = (
6879    ... '{"^^d": "namedtuple", "name": "ZoneInfo", "values": {'
6880    ... '"level": 1,'
6881    ... ' "parents": {"^^d": "set", "values": []},'
6882    ... ' "contents": {"^^d": "set", "values": []},'
6883    ... ' "tags": {},'
6884    ... ' "annotations": []'
6885    ... '}}'
6886    ... )
6887    >>> s == c
6888    True
6889
6890    TODO: SkillCombination example
6891    """
6892    def __init__(self, *args, **kwargs):
6893        if 'object_hook' in kwargs:
6894            outerHook = kwargs['object_hook']
6895            kwargs['object_hook'] = (
6896                lambda o: outerHook(self.unpack(o))
6897            )
6898            # TODO: What if it's a positional argument? :(
6899        else:
6900            kwargs['object_hook'] = lambda o: self.unpack(o)
6901
6902        if 'parseFormat' in kwargs:
6903            self.parseFormat = kwargs['parseFormat']
6904            del kwargs['parseFormat']
6905        else:
6906            self.parseFormat = ParseFormat()
6907
6908        super().__init__(*args, **kwargs)
6909
6910    def unpack(self, obj: Any) -> Any:
6911        """
6912        Unpacks an object; used as the `object_hook` for decoding.
6913        """
6914        if '^^d' in obj:
6915            asType = obj['^^d']
6916            if asType == 'tuple':
6917                return tuple(obj['values'])
6918
6919            elif asType == 'namedtuple':
6920                g = globals()
6921                name = obj['name']
6922                values = obj['values']
6923                # Use an existing global namedtuple class if there is
6924                # one that goes by the specified name, so that we don't
6925                # create too many spurious equivalent namedtuple
6926                # classes. But fall back on creating a new namedtuple
6927                # class if we need to:
6928                ntClass = g.get(name)
6929                if (
6930                    ntClass is None
6931                 or not issubclass(ntClass, tuple)
6932                 or not hasattr(ntClass, '_asdict')
6933                ):
6934                    ntClass = collections.namedtuple(  # type: ignore
6935                        name,
6936                        values.keys()
6937                    )
6938                ntClass = cast(Callable, ntClass)
6939                return ntClass(**values)
6940
6941            elif asType == 'set':
6942                return set(obj['values'])
6943
6944            elif asType == 'dict':
6945                return dict(obj['items'])
6946
6947            elif asType == 'Requirement':
6948                return self.parseFormat.parseRequirement(obj['value'])
6949
6950            elif asType == 'SkillCombination':
6951                return self.parseFormat.parseSkillCombination(obj['value'])
6952
6953            elif asType == 'Effect':
6954                return self.parseFormat.parseEffect(obj['value'])
6955
6956            elif asType == 'Challenge':
6957                return self.parseFormat.parseChallenge(obj['value'])
6958
6959            elif asType == 'Condition':
6960                return self.parseFormat.parseCondition(obj['value'])
6961
6962            elif asType == 'Consequence':
6963                return self.parseFormat.parseConsequence(obj['value'])
6964
6965            # TODO: Consequences here!
6966
6967            elif asType == 'DecisionGraph':
6968                baseGraph: networkx.MultiDiGraph = networkx.node_link_graph(
6969                    obj['node_links']
6970                )
6971                graphResult = core.DecisionGraph()
6972                # Copy over non-internal attributes
6973                for attr in dir(baseGraph):
6974                    if attr == "name":
6975                        continue
6976                    if not attr.startswith('__') or not attr.endswith('__'):
6977                        val = getattr(baseGraph, attr)
6978                        setattr(
6979                            graphResult,
6980                            attr,
6981                            copy.deepcopy(val)
6982                        )
6983
6984                if baseGraph.name != '':
6985                    graphResult.name = baseGraph.name
6986                graphResult.graph.update(obj['props'])  # type:ignore [attr-defined]  # noqa
6987                storedByEdge = obj['_byEdge']
6988                graphResult._byEdge = {
6989                    int(k): storedByEdge[k]
6990                    for k in storedByEdge
6991                }
6992                graphResult.zones = obj['zones']
6993                graphResult.unknownCount = obj['unknownCount']
6994                graphResult.equivalences = obj['equivalences']
6995                graphResult.reversionTypes = obj['reversionTypes']
6996                graphResult.nextMechanismID = obj['nextMechanismID']
6997                graphResult.mechanisms = {
6998                    int(k): v
6999                    for k, v in
7000                    obj['mechanisms'].items()
7001                }
7002                graphResult.globalMechanisms = obj['globalMechanisms']
7003                graphResult.nameLookup = obj['nameLookup']
7004                return graphResult
7005
7006            elif asType == 'DiscreteExploration':
7007                exResult = core.DiscreteExploration()
7008                exResult.situations = obj['situations']
7009                return exResult
7010
7011            elif asType == 'MetricSpace':
7012                msResult = base.MetricSpace(obj['name'])
7013                msResult.points = obj['points']
7014                msResult.nextID = obj['lastID'] + 1
7015                return msResult
7016
7017            else:
7018                raise NotImplementedError(
7019                    f"No special handling has been defined for"
7020                    f" decoding type '{asType}'."
7021                )
7022
7023        else:
7024            return obj

A custom JSON decoder that has special protocols for handling several types, including:

  • set
  • tuple & namedtuple
  • dict (where keys aren't all strings)
  • Requirement
  • SkillCombination
  • DecisionGraph
  • DiscreteExploration
  • MetricSpace

Used by toJSON

When initializing it, you can st a custom parse format by supplying a 'parseFormat' keyword argument; by default a standard ParseFormat will be used.

Examples:

>>> r = base.ReqAny([
...     base.ReqCapability('power'),
...     base.ReqTokens('money', 5)
... ])
>>> s = toJSON(r)
>>> s
'{"^^d": "Requirement", "value": "(power|money*5)"}'
>>> l = fromJSON(s)
>>> r == l
True
>>> o = {1, 2, 'hi'}
>>> s = toJSON(o)
>>> s
'{"^^d": "set", "values": [1, 2, "hi"]}'
>>> l = fromJSON(s)
>>> o == l
True
>>> zi = base.ZoneInfo(1, set(), set(), {}, [])
>>> s = toJSON(zi)
>>> c = (
... '{"^^d": "namedtuple", "name": "ZoneInfo", "values": {'
... '"level": 1,'
... ' "parents": {"^^d": "set", "values": []},'
... ' "contents": {"^^d": "set", "values": []},'
... ' "tags": {},'
... ' "annotations": []'
... '}}'
... )
>>> s == c
True

TODO: SkillCombination example

CustomJSONDecoder(*args, **kwargs)
6892    def __init__(self, *args, **kwargs):
6893        if 'object_hook' in kwargs:
6894            outerHook = kwargs['object_hook']
6895            kwargs['object_hook'] = (
6896                lambda o: outerHook(self.unpack(o))
6897            )
6898            # TODO: What if it's a positional argument? :(
6899        else:
6900            kwargs['object_hook'] = lambda o: self.unpack(o)
6901
6902        if 'parseFormat' in kwargs:
6903            self.parseFormat = kwargs['parseFormat']
6904            del kwargs['parseFormat']
6905        else:
6906            self.parseFormat = ParseFormat()
6907
6908        super().__init__(*args, **kwargs)

object_hook, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given dict. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting).

object_pairs_hook, if specified will be called with the result of every JSON object decoded with an ordered list of pairs. The return value of object_pairs_hook will be used instead of the dict. This feature can be used to implement custom decoders. If object_hook is also defined, the object_pairs_hook takes priority.

parse_float, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal).

parse_int, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float).

parse_constant, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered.

If strict is false (true is the default), then control characters will be allowed inside strings. Control characters in this context are those with character codes in the 0-31 range, including '\t' (tab), '\n', '\r' and '\0'.

def unpack(self, obj: Any) -> Any:
6910    def unpack(self, obj: Any) -> Any:
6911        """
6912        Unpacks an object; used as the `object_hook` for decoding.
6913        """
6914        if '^^d' in obj:
6915            asType = obj['^^d']
6916            if asType == 'tuple':
6917                return tuple(obj['values'])
6918
6919            elif asType == 'namedtuple':
6920                g = globals()
6921                name = obj['name']
6922                values = obj['values']
6923                # Use an existing global namedtuple class if there is
6924                # one that goes by the specified name, so that we don't
6925                # create too many spurious equivalent namedtuple
6926                # classes. But fall back on creating a new namedtuple
6927                # class if we need to:
6928                ntClass = g.get(name)
6929                if (
6930                    ntClass is None
6931                 or not issubclass(ntClass, tuple)
6932                 or not hasattr(ntClass, '_asdict')
6933                ):
6934                    ntClass = collections.namedtuple(  # type: ignore
6935                        name,
6936                        values.keys()
6937                    )
6938                ntClass = cast(Callable, ntClass)
6939                return ntClass(**values)
6940
6941            elif asType == 'set':
6942                return set(obj['values'])
6943
6944            elif asType == 'dict':
6945                return dict(obj['items'])
6946
6947            elif asType == 'Requirement':
6948                return self.parseFormat.parseRequirement(obj['value'])
6949
6950            elif asType == 'SkillCombination':
6951                return self.parseFormat.parseSkillCombination(obj['value'])
6952
6953            elif asType == 'Effect':
6954                return self.parseFormat.parseEffect(obj['value'])
6955
6956            elif asType == 'Challenge':
6957                return self.parseFormat.parseChallenge(obj['value'])
6958
6959            elif asType == 'Condition':
6960                return self.parseFormat.parseCondition(obj['value'])
6961
6962            elif asType == 'Consequence':
6963                return self.parseFormat.parseConsequence(obj['value'])
6964
6965            # TODO: Consequences here!
6966
6967            elif asType == 'DecisionGraph':
6968                baseGraph: networkx.MultiDiGraph = networkx.node_link_graph(
6969                    obj['node_links']
6970                )
6971                graphResult = core.DecisionGraph()
6972                # Copy over non-internal attributes
6973                for attr in dir(baseGraph):
6974                    if attr == "name":
6975                        continue
6976                    if not attr.startswith('__') or not attr.endswith('__'):
6977                        val = getattr(baseGraph, attr)
6978                        setattr(
6979                            graphResult,
6980                            attr,
6981                            copy.deepcopy(val)
6982                        )
6983
6984                if baseGraph.name != '':
6985                    graphResult.name = baseGraph.name
6986                graphResult.graph.update(obj['props'])  # type:ignore [attr-defined]  # noqa
6987                storedByEdge = obj['_byEdge']
6988                graphResult._byEdge = {
6989                    int(k): storedByEdge[k]
6990                    for k in storedByEdge
6991                }
6992                graphResult.zones = obj['zones']
6993                graphResult.unknownCount = obj['unknownCount']
6994                graphResult.equivalences = obj['equivalences']
6995                graphResult.reversionTypes = obj['reversionTypes']
6996                graphResult.nextMechanismID = obj['nextMechanismID']
6997                graphResult.mechanisms = {
6998                    int(k): v
6999                    for k, v in
7000                    obj['mechanisms'].items()
7001                }
7002                graphResult.globalMechanisms = obj['globalMechanisms']
7003                graphResult.nameLookup = obj['nameLookup']
7004                return graphResult
7005
7006            elif asType == 'DiscreteExploration':
7007                exResult = core.DiscreteExploration()
7008                exResult.situations = obj['situations']
7009                return exResult
7010
7011            elif asType == 'MetricSpace':
7012                msResult = base.MetricSpace(obj['name'])
7013                msResult.points = obj['points']
7014                msResult.nextID = obj['lastID'] + 1
7015                return msResult
7016
7017            else:
7018                raise NotImplementedError(
7019                    f"No special handling has been defined for"
7020                    f" decoding type '{asType}'."
7021                )
7022
7023        else:
7024            return obj

Unpacks an object; used as the object_hook for decoding.

Inherited Members
json.decoder.JSONDecoder
object_hook
parse_float
parse_int
parse_constant
strict
object_pairs_hook
parse_object
parse_array
parse_string
memo
scan_once
decode
raw_decode