exploration.parsing

  • Authors: Peter Mawhorter
  • Consulted:
  • Date: 2023-12-27
  • Purpose: Common code for parsing things, including conversions to custom string formats and JSON for some types.
   1"""
   2- Authors: Peter Mawhorter
   3- Consulted:
   4- Date: 2023-12-27
   5- Purpose: Common code for parsing things, including conversions to
   6    custom string formats and JSON for some types.
   7"""
   8
   9from typing import (
  10    Union, Dict, Optional, get_args, Tuple, List, cast, Set, TypeVar,
  11    Literal, TypeAlias, Generator, TypedDict, TextIO, Any, Callable,
  12    Type, Sequence
  13)
  14
  15import enum
  16import collections
  17import copy
  18import warnings
  19import json
  20
  21import networkx  # type: ignore
  22
  23from . import base
  24from . import core
  25from . import utils
  26from . import commands
  27
  28
  29#----------------#
  30# Format Details #
  31#----------------#
  32
  33Lexeme = enum.IntEnum(
  34    "Lexeme",
  35    [
  36        'domainSeparator',
  37        'zoneSeparator',
  38        'partSeparator',
  39        'stateOn',
  40        'stateOff',
  41        'tokenCount',
  42        'effectCharges',
  43        'sepOrDelay',
  44        'consequenceSeparator',
  45        'inCommon',
  46        'isHidden',
  47        'skillLevel',
  48        'wigglyLine',
  49        'withDetails',
  50        'reciprocalSeparator',
  51        'mechanismSeparator',
  52        'openCurly',
  53        'closeCurly',
  54        'openParen',
  55        'closeParen',
  56        'angleLeft',
  57        'angleRight',
  58        'doubleQuestionmark',
  59        'ampersand',
  60        'orBar',
  61        'notMarker',
  62    ]
  63)
  64"""
  65These are the different separators, grouping characters, and keywords
  66used as part of parsing. The characters that are actually recognized are
  67defined as part of a `Format`.
  68"""
  69
  70Format = Dict[Lexeme, str]
  71"""
  72A journal format is specified using a dictionary with keys that denote
  73journal marker types and values which are one-to-several-character
  74strings indicating the markup used for that entry/info type.
  75"""
  76
  77DEFAULT_FORMAT: Format = {
  78    # Separator
  79    Lexeme.domainSeparator: '//',
  80    Lexeme.zoneSeparator: '::',
  81    Lexeme.partSeparator: '%%',
  82    Lexeme.stateOn: '=on',  # TODO :Lexing issue!
  83    Lexeme.stateOff: '=off',
  84    Lexeme.tokenCount: '*',
  85    Lexeme.effectCharges: '=',
  86    Lexeme.sepOrDelay: ',',
  87    Lexeme.consequenceSeparator: ';',
  88    Lexeme.inCommon: '+c',
  89    Lexeme.isHidden: '+h',
  90    Lexeme.skillLevel: '^',
  91    Lexeme.wigglyLine: '~',
  92    Lexeme.withDetails: '%',
  93    Lexeme.reciprocalSeparator: '/',
  94    Lexeme.mechanismSeparator: ':',
  95    Lexeme.openCurly: '{',
  96    Lexeme.closeCurly: '}',
  97    Lexeme.openParen: '(',
  98    Lexeme.closeParen: ')',
  99    Lexeme.angleLeft: '<',
 100    Lexeme.angleRight: '>',
 101    Lexeme.doubleQuestionmark: '??',
 102    Lexeme.ampersand: '&',
 103    Lexeme.orBar: '|',
 104    Lexeme.notMarker: '!',
 105}
 106"""
 107The default parsing format.
 108"""
 109
 110DEFAULT_EFFECT_NAMES: Dict[str, base.EffectType] = {
 111    x: x for x in get_args(base.EffectType)
 112}
 113"""
 114Default names for each effect type. Maps names to canonical effect type
 115strings. A different mapping could be used to allow for writing effect
 116names in another language, for example.
 117"""
 118
 119DEFAULT_FOCALIZATION_NAMES: Dict[str, base.DomainFocalization] = {
 120    x: x for x in get_args(base.DomainFocalization)
 121}
 122"""
 123Default names for each domain focalization type. Maps each focalization
 124type string to itself.
 125"""
 126
 127DEFAULT_SF_INDICATORS: Tuple[str, str] = ('s', 'f')
 128"""
 129Default characters used to indicate success/failure when transcribing a
 130`TransitionWithOutcomes`.
 131"""
 132
 133
 134#-------------------#
 135# Errors & Warnings #
 136#-------------------#
 137
 138class ParseWarning(Warning):
 139    """
 140    Represents a warning encountered when parsing something.
 141    """
 142    pass
 143
 144
 145class ParseError(ValueError):
 146    """
 147    Represents a error encountered when parsing.
 148    """
 149    pass
 150
 151
 152class DotParseError(ParseError):
 153    """
 154    An error raised during parsing when incorrectly-formatted graphviz
 155    "dot" data is provided. See `parseDot`.
 156    """
 157    pass
 158
 159
 160class InvalidFeatureSpecifierError(ParseError):
 161    """
 162    An error used when a feature specifier is in the wrong format.
 163    Errors with part specifiers also use this.
 164    """
 165
 166
 167#--------#
 168# Lexing #
 169#--------#
 170
 171LexedTokens: TypeAlias = List[Union[Lexeme, str]]
 172"""
 173When lexing, we pull apart a string into pieces, but when we recognize
 174lexemes, we use their integer IDs in the list instead of strings, so we
 175get a list that's a mix of ints and strings.
 176"""
 177
 178GroupedTokens: TypeAlias = List[Union[Lexeme, str, 'GroupedTokens']]
 179"""
 180Some parsing processes group tokens into sub-lists. This type represents
 181`LexedTokens` which might also contain sub-lists, to arbitrary depth.
 182"""
 183
 184GroupedRequirementParts: TypeAlias = List[
 185    Union[Lexeme, base.Requirement, 'GroupedRequirementParts']
 186]
 187"""
 188Another intermediate parsing result during requirement parsing: a list
 189of `base.Requirements` possibly with some sub-lists and/or `Lexeme`s
 190mixed in.
 191"""
 192
 193
 194def lex(
 195    characters: str,
 196    tokenMap: Optional[Dict[str, Lexeme]] = None
 197) -> LexedTokens:
 198    """
 199    Lexes a list of tokens from a characters string. Recognizes any
 200    special characters you provide in the token map, as well as
 201    collections of non-mapped characters. Recognizes double-quoted
 202    strings which can contain any of those (and which use
 203    backslash-escapes for internal double quotes) and includes quoted
 204    versions of those strings as tokens (any token string starting with a
 205    double quote will be such a string). Breaks tokens on whitespace
 206    outside of quotation marks, and ignores that whitespace.
 207
 208    Examples:
 209
 210    >>> lex('abc')
 211    ['abc']
 212    >>> lex('(abc)', {'(': 0, ')': 1})
 213    [0, 'abc', 1]
 214    >>> lex('{(abc)}', {'(': 0, ')': 1, '{': 2, '}': 3})
 215    [2, 0, 'abc', 1, 3]
 216    >>> lex('abc def')
 217    ['abc', 'def']
 218    >>> lex('abc   def')
 219    ['abc', 'def']
 220    >>> lex('abc \\n def')
 221    ['abc', 'def']
 222    >>> lex ('"quoted"')
 223    ['"quoted"']
 224    >>> lex ('"quoted  pair"')
 225    ['"quoted  pair"']
 226    >>> lex ('  oneWord | "two words"|"three  words words" ', {'|': 0})
 227    ['oneWord', 0, '"two words"', 0, '"three  words words"']
 228    >>> tokenMap = { c: i for (i, c) in enumerate("(){}~:;>,") }
 229    >>> tokenMap['::'] = 9
 230    >>> tokenMap['~~'] = 10
 231    >>> lex(
 232    ...     '{~~2:best(brains, brawn)>{set switch on}'
 233    ...     '{deactivate ,1; bounce}}',
 234    ...     tokenMap
 235    ... )
 236    [2, 10, '2', 5, 'best', 0, 'brains', 8, 'brawn', 1, 7, 2, 'set',\
 237 'switch', 'on', 3, 2, 'deactivate', 8, '1', 6, 'bounce', 3, 3]
 238    >>> lex('set where::mechanism state', tokenMap)
 239    ['set', 'where', 9, 'mechanism', 'state']
 240    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
 241    >>> esc = r'"escape \\\\a"'
 242    >>> result = [ r'"escape \\\\a"' ]  # 'quoted' doubles the backslash
 243    >>> len(esc)
 244    12
 245    >>> len(result[0])
 246    12
 247    >>> lex(esc) == result
 248    True
 249    >>> quoteInQuote = r'before "hello \\\\ \\" goodbye"after'
 250    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
 251    >>> expect = ['before', r'"hello \\\\ \\" goodbye"', 'after']
 252    >>> lex(quoteInQuote) == expect
 253    True
 254    >>> lex('O\\'Neill')
 255    ["O'Neill"]
 256    >>> lex('one "quote ')
 257    ['one', '"quote "']
 258    >>> lex('geo*15', {'*': 0})
 259    ['geo', 0, '15']
 260    """
 261    if tokenMap is None:
 262        tokenMap = {}
 263    tokenStarts: Dict[str, List[str]] = {}
 264    for key in sorted(tokenMap.keys(), key=lambda x: -len(x)):
 265        tokenStarts.setdefault(key[:1], []).append(key)
 266    tokens: LexedTokens = []
 267    sofar = ''
 268    inQuote = False
 269    escaped = False
 270    skip = 0
 271    for i in range(len(characters)):
 272        if skip > 0:
 273            skip -= 1
 274            continue
 275
 276        char = characters[i]
 277        if escaped:
 278            # TODO: Escape sequences?
 279            sofar += char
 280            escaped = False
 281
 282        elif char == '\\':
 283            if inQuote:
 284                escaped = True
 285            else:
 286                sofar += char
 287
 288        elif char == '"':
 289            if sofar != '':
 290                if inQuote:
 291                    tokens.append(utils.quoted(sofar))
 292                else:
 293                    tokens.append(sofar)
 294            sofar = ''
 295            inQuote = not inQuote
 296
 297        elif inQuote:
 298            sofar += char
 299
 300        elif char in tokenStarts:
 301            options = tokenStarts[char]
 302            hit: Optional[str] = None
 303            for possibility in options:
 304                lp = len(possibility)
 305                if (
 306                    (lp == 1 and char == possibility)
 307                or characters[i:i + lp] == possibility
 308                ):
 309                    hit = possibility
 310                    break
 311
 312            if hit is not None:
 313                if sofar != '':
 314                    tokens.append(sofar)
 315                tokens.append(tokenMap[possibility])
 316                sofar = ''
 317                skip = len(hit) - 1
 318            else:  # Not actually a recognized token
 319                sofar += char
 320
 321        elif char.isspace():
 322            if sofar != '':
 323                tokens.append(sofar)
 324            sofar = ''
 325
 326        else:
 327            sofar += char
 328
 329    if sofar != '':
 330        if inQuote:
 331            tokens.append(utils.quoted(sofar))
 332        else:
 333            tokens.append(sofar)
 334
 335    return tokens
 336
 337
 338def unLex(
 339    tokens: LexedTokens,
 340    tokenMap: Optional[Dict[str, Lexeme]] = None
 341) -> str:
 342    """
 343    Turns lexed stuff back into a string, substituting strings back into
 344    token spots by reversing the given token map. Adds quotation marks to
 345    complex tokens where necessary to prevent them from re-lexing into
 346    multiple tokens (but `lex` doesn't  remove those, so in some cases
 347    there's not a perfect round-trip unLex -> lex).
 348
 349    For example:
 350
 351    >>> unLex(['a', 'b'])
 352    'a b'
 353    >>> tokens = {'(': 0, ')': 1, '{': 2, '}': 3, '::': 4}
 354    >>> unLex([0, 'hi', 1], tokens)
 355    '(hi)'
 356    >>> unLex([0, 'visit', 'zone', 4, 'decision', 1], tokens)
 357    '(visit zone::decision)'
 358    >>> q = unLex(['a complex token', '\\'single\\' and "double" quotes'])
 359    >>> q  # unLex adds quotes
 360    '"a complex token" "\\'single\\' and \\\\"double\\\\" quotes"'
 361    >>> lex(q)  # Not the same as the original list
 362    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
 363    >>> lex(unLex(lex(q)))  # But further round-trips work
 364    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
 365
 366    TODO: Fix this:
 367    For now, it generates incorrect results when token combinations can
 368    be ambiguous. These ambiguous token combinations should not ever be
 369    generated by `lex` at least. For example:
 370
 371    >>> ambiguous = {':': 0, '::': 1}
 372    >>> u = unLex(['a', 0, 0, 'b'], ambiguous)
 373    >>> u
 374    'a::b'
 375    >>> l = lex(u, ambiguous)
 376    >>> l
 377    ['a', 1, 'b']
 378    >>> l == u
 379    False
 380    """
 381    if tokenMap is None:
 382        nTokens = 0
 383        revMap = {}
 384    else:
 385        nTokens = len(tokenMap)
 386        revMap = {y: x for (x, y) in tokenMap.items()}
 387
 388    prevRaw = False
 389    # TODO: add spaces where necessary to disambiguate token sequences...
 390    if len(revMap) != nTokens:
 391        warnings.warn(
 392            (
 393                "Irreversible token map! Two or more tokens have the same"
 394                " integer value."
 395            ),
 396            ParseWarning
 397        )
 398
 399    result = ""
 400    for item in tokens:
 401        if isinstance(item, int):
 402            try:
 403                result += revMap[item]
 404            except KeyError:
 405                raise ValueError(
 406                    f"Tokens list contains {item} but the token map"
 407                    f" does not have any entry which maps to {item}."
 408                )
 409            prevRaw = False
 410        elif isinstance(item, str):
 411            if prevRaw:
 412                result += ' '
 413            if len(lex(item)) > 1:
 414                result += utils.quoted(item)
 415            else:
 416                result += item
 417            prevRaw = True
 418        else:
 419            raise TypeError(
 420                f"Token list contained non-int non-str item:"
 421                f" {repr(item)}"
 422            )
 423
 424    return result
 425
 426
 427#-------------------#
 428# ParseFormat class #
 429#-------------------#
 430
 431def normalizeEnds(
 432    tokens: List,
 433    start: int,
 434    end: int
 435) -> Tuple[int, int, int]:
 436    """
 437    Given a tokens list and start & end integers, does some bounds
 438    checking and normalization on the integers: converts negative
 439    indices to positive indices, and raises an `IndexError` if they're
 440    out-of-bounds after conversion. Returns a tuple containing the
 441    normalized start & end indices, along with the number of tokens they
 442    cover.
 443    """
 444    totalTokens = len(tokens)
 445    if start < -len(tokens):
 446        raise IndexError(
 447            f"Negative start index out of bounds (got {start} for"
 448            f" {totalTokens} tokens)."
 449        )
 450    elif start >= totalTokens:
 451        raise IndexError(
 452            f"Start index out of bounds (got {start} for"
 453            f" {totalTokens} tokens)."
 454        )
 455    elif start < 0:
 456        start = totalTokens + start
 457
 458    if end < -len(tokens):
 459        raise IndexError(
 460            f"Negative end index out of bounds (got {end} for"
 461            f" {totalTokens} tokens)."
 462        )
 463    elif end >= totalTokens:
 464        raise IndexError(
 465            f"Start index out of bounds (got {end} for"
 466            f" {totalTokens} tokens)."
 467        )
 468    elif end < 0:
 469        end = totalTokens + end
 470
 471    if end >= len(tokens):
 472        end = len(tokens) - 1
 473
 474    return (start, end, (end - start) + 1)
 475
 476
 477def findSeparatedParts(
 478    tokens: LexedTokens,
 479    sep: Union[str, int],
 480    start: int = 0,
 481    end: int = -1,
 482    groupStart: Union[str, int, None] = None,
 483    groupEnd: Union[str, int, None] = None
 484) -> Generator[Tuple[int, int], None, None]:
 485    """
 486    Finds parts separated by a separator lexeme, such as ';' or ',', but
 487    ignoring separators nested within groupStart/groupEnd pairs (if
 488    those arguments are supplied). For each token sequence found, yields
 489    a tuple containing the start index and end index for that part, with
 490    separators not included in the parts.
 491
 492    If two separators appear in a row, the start/end pair will have a
 493    start index one after the end index.
 494
 495    If there are no separators, yields one pair containing the start and
 496    end of the entire tokens sequence.
 497
 498    Raises a `ParseError` if there are unbalanced grouping elements.
 499
 500    For example:
 501
 502    >>> list(findSeparatedParts(
 503    ...     [ 'one' ],
 504    ...     Lexeme.sepOrDelay,
 505    ...     0,
 506    ...     0,
 507    ...     Lexeme.openParen,
 508    ...     Lexeme.closeParen
 509    ... ))
 510    [(0, 0)]
 511    >>> list(findSeparatedParts(
 512    ...     [
 513    ...         'best',
 514    ...         Lexeme.openParen,
 515    ...         'chess',
 516    ...         Lexeme.sepOrDelay,
 517    ...         'checkers',
 518    ...         Lexeme.closeParen
 519    ...     ],
 520    ...     Lexeme.sepOrDelay,
 521    ...     2,
 522    ...     4,
 523    ...     Lexeme.openParen,
 524    ...     Lexeme.closeParen
 525    ... ))
 526    [(2, 2), (4, 4)]
 527    """
 528    start, end, n = normalizeEnds(tokens, start, end)
 529    level = 0
 530    thisStart = start
 531    for i in range(start, end + 1):
 532        token = tokens[i]
 533        if token == sep and level == 0:
 534            yield (thisStart, i - 1)
 535            thisStart = i + 1
 536        elif token == groupStart:
 537            level += 1
 538        elif token == groupEnd:
 539            level -= 1
 540            if level < 0:
 541                raise ParseError("Unbalanced grouping tokens.")
 542    if level < 0:
 543        raise ParseError("Unbalanced grouping tokens.")
 544    yield (thisStart, end)
 545
 546
 547K = TypeVar('K')
 548"Type variable for dictionary keys."
 549V = TypeVar('V')
 550"Type variable for dictionary values."
 551
 552
 553def checkCompleteness(
 554    name,
 555    mapping: Dict[K, V],
 556    keysSet: Optional[Set[K]] = None,
 557    valuesSet: Optional[Set[V]] = None
 558):
 559    """
 560    Checks that a dictionary has a certain exact set of keys (or
 561    values). Raises a `ValueError` if it finds an extra or missing key
 562    or value.
 563    """
 564    if keysSet is not None:
 565        for key in mapping.keys():
 566            if key not in keysSet:
 567                raise ValueError("{name} has extra key {repr(key)}.")
 568
 569        for key in keysSet:
 570            if key not in mapping:
 571                raise ValueError("{name} is missing key {repr(key)}.")
 572
 573    if valuesSet is not None:
 574        for value in mapping.values():
 575            if value not in valuesSet:
 576                raise ValueError("{name} has extra value {repr(value)}.")
 577
 578        checkVals = mapping.values()
 579        for value in valuesSet:
 580            if value not in checkVals:
 581                raise ValueError("{name} is missing value {repr(value)}.")
 582
 583
 584class ParseFormat:
 585    """
 586    A ParseFormat manages the mapping from markers to entry types and
 587    vice versa.
 588    """
 589    def __init__(
 590        self,
 591        formatDict: Format = DEFAULT_FORMAT,
 592        effectNames: Dict[str, base.EffectType] = DEFAULT_EFFECT_NAMES,
 593        focalizationNames: Dict[
 594            str,
 595            base.DomainFocalization
 596        ] = DEFAULT_FOCALIZATION_NAMES,
 597        successFailureIndicators: Tuple[str, str] = DEFAULT_SF_INDICATORS
 598    ):
 599        """
 600        Sets up the parsing format. Requires a `Format` dictionary to
 601        define the specifics. Raises a `ValueError` unless the keys of
 602        the `Format` dictionary exactly match the `Lexeme` values.
 603        """
 604        self.formatDict = formatDict
 605        self.effectNames = effectNames
 606        self.focalizationNames = focalizationNames
 607        if (
 608            len(successFailureIndicators) != 2
 609        or any(len(i) != 1 for i in successFailureIndicators)
 610        ):
 611            raise ValueError(
 612                f"Invalid success/failure indicators: must be a pair of"
 613                f" length-1 strings. Got: {successFailureIndicators!r}"
 614            )
 615        self.successIndicator, self.failureIndicator = (
 616            successFailureIndicators
 617        )
 618
 619        # Check completeness for each dictionary
 620        checkCompleteness('formatDict', self.formatDict, set(Lexeme))
 621        checkCompleteness(
 622            'effectNames',
 623            self.effectNames,
 624            valuesSet=set(get_args(base.EffectType))
 625        )
 626        checkCompleteness(
 627            'focalizationNames',
 628            self.focalizationNames,
 629            valuesSet=set(get_args(base.DomainFocalization))
 630        )
 631
 632        # Build some reverse lookup dictionaries for specific
 633        self.reverseFormat = {y: x for (x, y) in self.formatDict.items()}
 634
 635        # circumstances:
 636        self.effectModMap = {
 637            self.formatDict[x]: x
 638            for x in [
 639                Lexeme.effectCharges,
 640                Lexeme.sepOrDelay,
 641                Lexeme.inCommon,
 642                Lexeme.isHidden
 643            ]
 644        }
 645
 646    def lex(self, content: str) -> LexedTokens:
 647        """
 648        Applies `lex` using this format's lexeme mapping.
 649        """
 650        return lex(content, self.reverseFormat)
 651
 652    def onOff(self, word: str) -> Optional[bool]:
 653        """
 654        Parse an on/off indicator and returns a boolean (`True` for on
 655        and `False` for off). Returns `None` if the word isn't either
 656        the 'on' or the 'off' word. Generates a `ParseWarning`
 657        (and still returns `None`) if the word is a case-swapped version
 658        of the 'on' or 'off' word and is not equal to either of them.
 659        """
 660        onWord = self.formatDict[Lexeme.stateOn]
 661        offWord = self.formatDict[Lexeme.stateOff]
 662
 663        # Generate warning if we suspect a case error
 664        if (
 665            word.casefold() in (onWord, offWord)
 666        and word not in (onWord, offWord)
 667        ):
 668            warnings.warn(
 669                (
 670                    f"Word '{word}' cannot be interpreted as an on/off"
 671                    f" value, although it is almost one (the correct"
 672                    f" values are '{onWord}' and '{offWord}'."
 673                ),
 674                ParseWarning
 675            )
 676
 677        # return the appropriate value
 678        if word == onWord:
 679            return True
 680        elif word == offWord:
 681            return False
 682        else:
 683            return None
 684
 685    def matchingBrace(
 686        self,
 687        tokens: LexedTokens,
 688        where: int,
 689        opener: int = Lexeme.openCurly,
 690        closer: int = Lexeme.closeCurly
 691    ) -> int:
 692        """
 693        Returns the index within the given tokens list of the closing
 694        curly brace which matches the open brace at the specified index.
 695        You can specify custom `opener` and/or `closer` lexemes to find
 696        matching pairs of other things. Raises a `ParseError` if there
 697        is no opening brace at the specified index, or if there isn't a
 698        matching closing brace. Handles nested braces of the specified
 699        type.
 700
 701        Examples:
 702        >>> pf = ParseFormat()
 703        >>> ob = Lexeme.openCurly
 704        >>> cb = Lexeme.closeCurly
 705        >>> pf.matchingBrace([ob, cb], 0)
 706        1
 707        >>> pf.matchingBrace([ob, cb], 1)
 708        Traceback (most recent call last):
 709          ...
 710        exploration.parsing.ParseError: ...
 711        >>> pf.matchingBrace(['hi', ob, cb], 0)
 712        Traceback (most recent call last):
 713          ...
 714        exploration.parsing.ParseError: ...
 715        >>> pf.matchingBrace(['hi', ob, cb], 1)
 716        2
 717        >>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
 718        3
 719        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
 720        Traceback (most recent call last):
 721          ...
 722        exploration.parsing.ParseError: ...
 723        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
 724        3
 725        >>> pf.matchingBrace([ob, ob, cb, cb], 0)
 726        3
 727        >>> pf.matchingBrace([ob, ob, cb, cb], 1)
 728        2
 729        >>> pf.matchingBrace([ob, cb, ob, cb], 0)
 730        1
 731        >>> pf.matchingBrace([ob, cb, ob, cb], 2)
 732        3
 733        >>> pf.matchingBrace([ob, cb, cb, cb], 0)
 734        1
 735        >>> pf.matchingBrace([ob, ob, ob, cb], 0)
 736        Traceback (most recent call last):
 737          ...
 738        exploration.parsing.ParseError: ...
 739        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
 740        7
 741        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
 742        6
 743        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
 744        Traceback (most recent call last):
 745          ...
 746        exploration.parsing.ParseError: ...
 747        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
 748        4
 749        >>> op = Lexeme.openParen
 750        >>> cp = Lexeme.closeParen
 751        >>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
 752        3
 753        """
 754        if where >= len(tokens):
 755            raise ParseError(
 756                f"Out-of-bounds brace start: index {where} with"
 757                f" {len(tokens)} tokens."
 758            )
 759        if tokens[where] != opener:
 760            raise ParseError(
 761                f"Can't find matching brace for token"
 762                f" {repr(tokens[where])} at index {where} because it's"
 763                f" not an open brace."
 764            )
 765
 766        level = 1
 767        for i in range(where + 1, len(tokens)):
 768            token = tokens[i]
 769            if token == opener:
 770                level += 1
 771            elif token == closer:
 772                level -= 1
 773                if level == 0:
 774                    return i
 775
 776        raise ParseError(
 777            f"Failed to find matching curly brace from index {where}."
 778        )
 779
 780    def parseFocalization(self, word: str) -> base.DomainFocalization:
 781        """
 782        Parses a focalization type for a domain, recognizing
 783        'domainFocalizationSingular', 'domainFocalizationPlural', and
 784        'domainFocalizationSpreading'.
 785        """
 786        try:
 787            return self.focalizationNames[word]
 788        except KeyError:
 789            raise ParseError(
 790                f"Invalid domain focalization name {repr(word)}. Valid"
 791                f" name are: {repr(list(self.focalizationNames))}'."
 792            )
 793
 794    def parseTagValue(self, value: str) -> base.TagValue:
 795        """
 796        Converts a string to a tag value, following these rules:
 797
 798        1. If the string is exactly one of 'None', 'True', or 'False', we
 799            convert it to the corresponding Python value.
 800        2. If the string can be converted to an integer without raising a
 801            ValueError, we use that integer.
 802        3. If the string can be converted to a float without raising a
 803            ValueError, we use that float.
 804        4. Otherwise, it remains a string.
 805
 806        Note that there is currently no syntax for using list, dictionary,
 807        Requirement, or Consequence tag values.
 808        TODO: Support those types?
 809
 810        Examples:
 811
 812        >>> pf = ParseFormat()
 813        >>> pf.parseTagValue('hi')
 814        'hi'
 815        >>> pf.parseTagValue('3')
 816        3
 817        >>> pf.parseTagValue('3.0')
 818        3.0
 819        >>> pf.parseTagValue('True')
 820        True
 821        >>> pf.parseTagValue('False')
 822        False
 823        >>> pf.parseTagValue('None') is None
 824        True
 825        >>> pf.parseTagValue('none')
 826        'none'
 827        """
 828        # TODO: Allow these keywords to be redefined?
 829        if value == 'True':
 830            return True
 831        elif value == 'False':
 832            return False
 833        elif value == 'None':
 834            return None
 835        else:
 836            try:
 837                return int(value)
 838            except ValueError:
 839                try:
 840                    return float(value)
 841                except ValueError:
 842                    return value
 843
 844    def unparseTagValue(self, value: base.TagValue) -> str:
 845        """
 846        Converts a tag value into a string that would be parsed back into a
 847        tag value via `parseTagValue`. Currently does not work for list,
 848        dictionary, Requirement, or Consequence values.
 849        TODO: Those
 850        """
 851        return str(value)
 852
 853    def hasZoneParts(self, name: str) -> bool:
 854        """
 855        Returns true if the specified name contains zone parts (using
 856        the `zoneSeparator`).
 857        """
 858        return self.formatDict[Lexeme.zoneSeparator] in name
 859
 860    def splitZone(
 861        self,
 862        name: str
 863    ) -> Tuple[List[base.Zone], base.DecisionName]:
 864        """
 865        Splits a decision name that includes zone information into the
 866        list-of-zones part and the decision part. If there is no zone
 867        information in the name, the list-of-zones will be an empty
 868        list.
 869        """
 870        sep = self.formatDict[Lexeme.zoneSeparator]
 871        parts = name.split(sep)
 872        return (list(parts[:-1]), parts[-1])
 873
 874    def prefixWithZone(
 875        self,
 876        name: base.DecisionName,
 877        zone: base.Zone
 878    ) -> base.DecisionName:
 879        """
 880        Returns the given decision name, prefixed with the given zone
 881        name. Does NOT check whether the decision name already includes
 882        a prefix or not.
 883        """
 884        return zone + self.formatDict[Lexeme.zoneSeparator] + name
 885
 886    def parseAnyTransitionFromTokens(
 887        self,
 888        tokens: LexedTokens,
 889        start: int = 0
 890    ) -> Tuple[base.TransitionWithOutcomes, int]:
 891        """
 892        Parses a `base.TransitionWithOutcomes` from a tokens list,
 893        accepting either a transition name or a transition name followed
 894        by a `Lexeme.withDetails` followed by a string of success and
 895        failure indicator characters. Returns a tuple containing a
 896        `base.TransitionWithOutcomes` and an integer indicating the end
 897        index of the parsed item within the tokens.
 898        """
 899        # Normalize start index so we can do index math
 900        if start < 0:
 901            useIndex = len(tokens) + start
 902        else:
 903            useIndex = start
 904
 905        try:
 906            first = tokens[useIndex]
 907        except IndexError:
 908            raise ParseError(
 909                f"Invalid token index: {start!r} among {len(tokens)}"
 910                f" tokens."
 911            )
 912
 913        if isinstance(first, Lexeme):
 914            raise ParseError(
 915                f"Expecting a transition name (possibly with a"
 916                f" success/failure indicator string) but first token is"
 917                f" {first!r}."
 918            )
 919
 920        try:
 921            second = tokens[useIndex + 1]
 922            third = tokens[useIndex + 2]
 923        except IndexError:
 924            return ((first, []), useIndex)
 925
 926        if second != Lexeme.withDetails or isinstance(third, Lexeme):
 927            return ((first, []), useIndex)
 928
 929        outcomes = []
 930        for char in third:
 931            if char == self.successIndicator:
 932                outcomes.append(True)
 933            elif char == self.failureIndicator:
 934                outcomes.append(False)
 935            else:
 936                return ((first, []), useIndex)
 937
 938        return ((first, outcomes), useIndex + 2)
 939
 940    def parseTransitionWithOutcomes(
 941        self,
 942        content: str
 943    ) -> base.TransitionWithOutcomes:
 944        """
 945        Takes a transition that may have outcomes listed as a series of
 946        s/f strings after a colon and returns the corresponding
 947        `TransitionWithOutcomes` tuple. Calls `lex` and then
 948        `parseAnyTransitionFromTokens`.
 949        """
 950        return self.parseAnyTransitionFromTokens(self.lex(content))[0]
 951
 952    def unparseTransitionWithOutocmes(
 953        self,
 954        transition: base.AnyTransition
 955    ) -> str:
 956        """
 957        Turns a `base.AnyTransition` back into a string that would parse
 958        to an equivalent `base.TransitionWithOutcomes` via
 959        `parseTransitionWithOutcomes`. If a bare `base.Transition` is
 960        given, returns a string that would result in a
 961        `base.TransitionWithOutcomes` that has an empty outcomes
 962        sequence.
 963        """
 964        if isinstance(transition, base.Transition):
 965            return transition
 966        elif (
 967            isinstance(transition, tuple)
 968        and len(transition) == 2
 969        and isinstance(transition[0], base.Transition)
 970        and isinstance(transition[1], list)
 971        and all(isinstance(sfi, bool) for sfi in transition[1])
 972        ):
 973            if len(transition[1]) == 0:
 974                return transition[0]
 975            else:
 976                result = transition[0] + self.formatDict[Lexeme.withDetails]
 977                for outcome in transition[1]:
 978                    if outcome:
 979                        result += self.successIndicator
 980                    else:
 981                        result += self.failureIndicator
 982                return result
 983        else:
 984            raise TypeError(
 985                f"Invalid AnyTransition: neither a string, nor a"
 986                f" length-2 tuple consisting of a string followed by a"
 987                f" list of booleans. Got: {transition!r}"
 988            )
 989
 990    def parseSpecificTransition(
 991        self,
 992        content: str
 993    ) -> Tuple[base.DecisionName, base.Transition]:
 994        """
 995        Splits a decision:transition pair to the decision and transition
 996        part, using a custom separator if one is defined.
 997        """
 998        sep = self.formatDict[Lexeme.withDetails]
 999        n = content.count(sep)
1000        if n == 0:
1001            raise ParseError(
1002                f"Cannot split '{content}' into a decision name and a"
1003                f" transition name (no separator '{sep}' found)."
1004            )
1005        elif n > 1:
1006            raise ParseError(
1007                f"Cannot split '{content}' into a decision name and a"
1008                f" transition name (too many ({n}) '{sep}' separators"
1009                f" found)."
1010            )
1011        else:
1012            return cast(
1013                Tuple[base.DecisionName, base.Transition],
1014                tuple(content.split(sep))
1015            )
1016
1017    def splitDirections(
1018        self,
1019        content: str
1020    ) -> Tuple[Optional[str], Optional[str]]:
1021        """
1022        Splits a piece of text using the 'Lexeme.reciprocalSeparator'
1023        into two pieces. If there is no separator, the second piece will
1024        be `None`; if either side of the separator is blank, that side
1025        will be `None`, and if there is more than one separator, a
1026        `ParseError` will be raised. Whitespace will be stripped from
1027        both sides of each result.
1028
1029        Examples:
1030
1031        >>> pf = ParseFormat()
1032        >>> pf.splitDirections('abc / def')
1033        ('abc', 'def')
1034        >>> pf.splitDirections('abc def ')
1035        ('abc def', None)
1036        >>> pf.splitDirections('abc def /')
1037        ('abc def', None)
1038        >>> pf.splitDirections('/abc def')
1039        (None, 'abc def')
1040        >>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
1041        Traceback (most recent call last):
1042          ...
1043        ParseError: ...
1044        """
1045        sep = self.formatDict[Lexeme.reciprocalSeparator]
1046        count = content.count(sep)
1047        if count > 1:
1048            raise ParseError(
1049                f"Too many split points ('{sep}') in content:"
1050                f" '{content}' (only one is allowed)."
1051            )
1052
1053        elif count == 1:
1054            before, after = content.split(sep)
1055            before = before.strip()
1056            after = after.strip()
1057            return (before or None, after or None)
1058
1059        else: # no split points
1060            stripped = content.strip()
1061            if stripped:
1062                return stripped, None
1063            else:
1064                return None, None
1065
1066    def parseItem(
1067        self,
1068        item: str
1069    ) -> Union[
1070        base.Capability,
1071        Tuple[base.Token, int],
1072        Tuple[base.MechanismName, base.MechanismState]
1073    ]:
1074        """
1075        Parses an item, which is a capability (just a string), a
1076        token-type*number pair (returned as a tuple with the number
1077        converted to an integer), or a mechanism-name:state pair
1078        (returned as a tuple with the state as a string). The
1079        'Lexeme.tokenCount' and `Lexeme.mechanismSeparator` format
1080        values determine the separators that this looks for.
1081        """
1082        tsep = self.formatDict[Lexeme.tokenCount]
1083        msep = self.formatDict[Lexeme.mechanismSeparator]
1084        if tsep in item:
1085            # It's a token w/ an associated count
1086            parts = item.split(tsep)
1087            if len(parts) != 2:
1088                raise ParseError(
1089                    f"Item '{item}' has a '{tsep}' but doesn't separate"
1090                    f" into a token type and a count."
1091                )
1092            typ, count = parts
1093            try:
1094                num = int(count)
1095            except ValueError:
1096                raise ParseError(
1097                    f"Item '{item}' has invalid token count '{count}'."
1098                )
1099
1100            return (typ, num)
1101        elif msep in item:
1102            parts = item.split(msep)
1103            mechanism = msep.join(parts[:-1])
1104            state = parts[-1]
1105            if mechanism.endswith(':'):
1106                # Just a zone-qualified name...
1107                return item
1108            else:
1109                return (mechanism, state)
1110        else:
1111            # It's just a capability
1112            return item
1113
1114    def unparseDecisionSpecifier(self, spec: base.DecisionSpecifier) -> str:
1115        """
1116        Turns a decision specifier back into a string, which would be
1117        parsed as a decision specifier as part of various different
1118        things.
1119
1120        For example:
1121
1122        >>> pf = ParseFormat()
1123        >>> pf.unparseDecisionSpecifier(
1124        ...     base.DecisionSpecifier(None, None, 'where')
1125        ... )
1126        'where'
1127        >>> pf.unparseDecisionSpecifier(
1128        ...     base.DecisionSpecifier(None, 'zone', 'where')
1129        ... )
1130        'zone::where'
1131        >>> pf.unparseDecisionSpecifier(
1132        ...     base.DecisionSpecifier('domain', 'zone', 'where')
1133        ... )
1134        'domain//zone::where'
1135        >>> pf.unparseDecisionSpecifier(
1136        ...     base.DecisionSpecifier('domain', None, 'where')
1137        ... )
1138        'domain//where'
1139        """
1140        result = spec.name
1141        if spec.zone is not None:
1142            result = (
1143                spec.zone
1144              + self.formatDict[Lexeme.zoneSeparator]
1145              + result
1146            )
1147        if spec.domain is not None:
1148            result = (
1149                spec.domain
1150              + self.formatDict[Lexeme.domainSeparator]
1151              + result
1152            )
1153        return result
1154
1155    def unparseMechanismSpecifier(
1156        self,
1157        spec: base.MechanismSpecifier
1158    ) -> str:
1159        """
1160        Turns a mechanism specifier back into a string, which would be
1161        parsed as a mechanism specifier as part of various different
1162        things. Note that a mechanism specifier with a zone part but no
1163        decision part is not valid, since it would parse as a decision
1164        part instead.
1165
1166        For example:
1167
1168        >>> pf = ParseFormat()
1169        >>> pf.unparseMechanismSpecifier(
1170        ...     base.MechanismSpecifier(None, None, None, 'lever')
1171        ... )
1172        'lever'
1173        >>> pf.unparseMechanismSpecifier(
1174        ...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
1175        ... )
1176        'domain//zone::decision::door'
1177        >>> pf.unparseMechanismSpecifier(
1178        ...     base.MechanismSpecifier('domain', None, None, 'door')
1179        ... )
1180        'domain//door'
1181        >>> pf.unparseMechanismSpecifier(
1182        ...     base.MechanismSpecifier(None, 'a', 'b', 'door')
1183        ... )
1184        'a::b::door'
1185        >>> pf.unparseMechanismSpecifier(
1186        ...     base.MechanismSpecifier(None, 'a', None, 'door')
1187        ... )
1188        Traceback (most recent call last):
1189        ...
1190        exploration.base.InvalidMechanismSpecifierError...
1191        >>> pf.unparseMechanismSpecifier(
1192        ...     base.MechanismSpecifier(None, None, 'a', 'door')
1193        ... )
1194        'a::door'
1195        """
1196        if spec.decision is None and spec.zone is not None:
1197            raise base.InvalidMechanismSpecifierError(
1198                f"Mechanism specifier has a zone part but no decision"
1199                f" part; it cannot be unparsed since it would parse"
1200                f" differently:\n{spec}"
1201            )
1202        result = spec.name
1203        if spec.decision is not None:
1204            result = (
1205                spec.decision
1206              + self.formatDict[Lexeme.zoneSeparator]
1207              + result
1208            )
1209        if spec.zone is not None:
1210            result = (
1211                spec.zone
1212              + self.formatDict[Lexeme.zoneSeparator]
1213              + result
1214            )
1215        if spec.domain is not None:
1216            result = (
1217                spec.domain
1218              + self.formatDict[Lexeme.domainSeparator]
1219              + result
1220            )
1221        return result
1222
1223    def effectType(self, effectMarker: str) -> Optional[base.EffectType]:
1224        """
1225        Returns the `base.EffectType` string corresponding to the
1226        given effect marker string. Returns `None` for an unrecognized
1227        marker.
1228        """
1229        return self.effectNames.get(effectMarker)
1230
1231    def parseCommandFromTokens(
1232        self,
1233        tokens: LexedTokens,
1234        start: int = 0,
1235        end: int = -1
1236    ) -> commands.Command:
1237        """
1238        Given tokens that specify a `commands.Command`, parses that
1239        command and returns it. Really just turns the tokens back into
1240        strings and calls `commands.command`.
1241
1242        For example:
1243
1244        >>> pf = ParseFormat()
1245        >>> t = ['val', '5']
1246        >>> c = commands.command(*t)
1247        >>> pf.parseCommandFromTokens(t) == c
1248        True
1249        >>> t = ['op', Lexeme.tokenCount, '$val', '$val']
1250        >>> c = commands.command('op', '*', '$val', '$val')
1251        >>> pf.parseCommandFromTokens(t) == c
1252        True
1253        """
1254        start, end, nTokens = normalizeEnds(tokens, start, end)
1255        args: List[str] = []
1256        for token in tokens[start:end + 1]:
1257            if isinstance(token, Lexeme):
1258                args.append(self.formatDict[token])
1259            else:
1260                args.append(token)
1261
1262        if len(args) == 0:
1263            raise ParseError(
1264                f"No arguments for command:\n{tokens[start:end + 1]}"
1265            )
1266        return commands.command(*args)
1267
1268    def unparseCommand(self, command: commands.Command) -> str:
1269        """
1270        Turns a `Command` back into the string that would produce that
1271        command when parsed using `parseCommandList`.
1272
1273        Note that the results will be more explicit in some cases than what
1274        `parseCommandList` would accept as input.
1275
1276        For example:
1277
1278        >>> pf = ParseFormat()
1279        >>> pf.unparseCommand(
1280        ...     commands.LiteralValue(command='val', value='5')
1281        ... )
1282        'val 5'
1283        >>> pf.unparseCommand(
1284        ...     commands.LiteralValue(command='val', value='"5"')
1285        ... )
1286        'val "5"'
1287        >>> pf.unparseCommand(
1288        ...     commands.EstablishCollection(
1289        ...         command='empty',
1290        ...         collection='list'
1291        ...     )
1292        ... )
1293        'empty list'
1294        >>> pf.unparseCommand(
1295        ...     commands.AppendValue(command='append', value='$_')
1296        ... )
1297        'append $_'
1298        """
1299        candidate = None
1300        for k, v in commands.COMMAND_SETUP.items():
1301            if v[0] == type(command):
1302                if candidate is None:
1303                    candidate = k
1304                else:
1305                    raise ValueError(
1306                        f"COMMAND_SETUP includes multiple keys with"
1307                        f" {type(command)} as their value type:"
1308                        f" '{candidate}' and '{k}'."
1309                    )
1310
1311        if candidate is None:
1312            raise ValueError(
1313                f"COMMAND_SETUP has no key with {type(command)} as its"
1314                f" value type."
1315            )
1316
1317        result = candidate
1318        for x in command[1:]:
1319            # TODO: Is this hack good enough?
1320            result += ' ' + str(x)
1321        return result
1322
1323    def unparseCommandList(self, commands: List[commands.Command]) -> str:
1324        """
1325        Takes a list of commands and returns a string that would parse
1326        into them using `parseOneEffectArg`. The result contains
1327        newlines and indentation to make it easier to read.
1328
1329        For example:
1330
1331        >>> pf = ParseFormat()
1332        >>> pf.unparseCommandList(
1333        ...     [commands.command('val', '5'), commands.command('pop')]
1334        ... )
1335        '{\\n  val 5;\\n  pop;\\n}'
1336        """
1337        result = self.formatDict[Lexeme.openCurly]
1338        for cmd in commands:
1339            result += f'\n  {self.unparseCommand(cmd)};'
1340        if len(commands) > 0:
1341            result += '\n'
1342        return result + self.formatDict[Lexeme.closeCurly]
1343
1344    def parseCommandListFromTokens(
1345        self,
1346        tokens: LexedTokens,
1347        start: int = 0
1348    ) -> Tuple[List[commands.Command], int]:
1349        """
1350        Parses a command list from a list of lexed tokens, which must
1351        start with `Lexeme.openCurly`. Returns the parsed command list
1352        as a list of `commands.Command` objects, along with the end
1353        index of that command list (which will be the matching curly
1354        brace.
1355        """
1356        end = self.matchingBrace(
1357            tokens,
1358            start,
1359            Lexeme.openCurly,
1360            Lexeme.closeCurly
1361        )
1362        parts = list(
1363            findSeparatedParts(
1364                tokens,
1365                Lexeme.consequenceSeparator,
1366                start + 1,
1367                end - 1,
1368                Lexeme.openCurly,
1369                Lexeme.closeCurly,
1370            )
1371        )
1372        return (
1373            [
1374                self.parseCommandFromTokens(tokens, fromIndex, toIndex)
1375                for fromIndex, toIndex in parts
1376                if fromIndex <= toIndex  # ignore empty parts
1377            ],
1378            end
1379        )
1380
1381    def parseOneEffectArg(
1382        self,
1383        tokens: LexedTokens,
1384        start: int = 0,
1385        limit: Optional[int] = None
1386    ) -> Tuple[
1387        Union[
1388            base.Capability,  # covers 'str' possibility
1389            Tuple[base.Token, base.TokenCount],
1390            Tuple[Literal['skill'], base.Skill, base.Level],
1391            Tuple[base.MechanismSpecifier, base.MechanismState],
1392            base.DecisionSpecifier,
1393            base.DecisionID,
1394            Literal[Lexeme.inCommon, Lexeme.isHidden],
1395            Tuple[Literal[Lexeme.sepOrDelay, Lexeme.effectCharges], int],
1396            List[commands.Command]
1397        ],
1398        int
1399    ]:
1400        """
1401        Looks at tokens starting at the specified position and parses
1402        one or more of them as an effect argument (an argument that
1403        could be given to `base.effect`). Looks at various key `Lexeme`s
1404        to determine which type to use.
1405
1406        Items in the tokens list beyond the specified limit will not be
1407        considered, even when they in theory could be grouped with items
1408        up to the limit into a more complex argument.
1409
1410        For example:
1411
1412        >>> pf = ParseFormat()
1413        >>> pf.parseOneEffectArg(['hi'])
1414        ('hi', 0)
1415        >>> pf.parseOneEffectArg(['hi'], 1)
1416        Traceback (most recent call last):
1417        ...
1418        IndexError...
1419        >>> pf.parseOneEffectArg(['hi', 'bye'])
1420        ('hi', 0)
1421        >>> pf.parseOneEffectArg(['hi', 'bye'], 1)
1422        ('bye', 1)
1423        >>> pf.parseOneEffectArg(
1424        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1425        ...     0
1426        ... )
1427        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1428 name='gate'), 'open'), 2)
1429        >>> pf.parseOneEffectArg(
1430        ...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
1431        ...     1
1432        ... )
1433        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1434 name='gate'), 'open'), 3)
1435        >>> pf.parseOneEffectArg(
1436        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1437        ...     1
1438        ... )
1439        Traceback (most recent call last):
1440        ...
1441        exploration.parsing.ParseError...
1442        >>> pf.parseOneEffectArg(
1443        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1444        ...     2
1445        ... )
1446        ('open', 2)
1447        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
1448        (('gold', 10), 2)
1449        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
1450        Traceback (most recent call last):
1451        ...
1452        exploration.parsing.ParseError...
1453        >>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
1454        (<Lexeme.inCommon: ...>, 0)
1455        >>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
1456        (<Lexeme.isHidden: ...>, 0)
1457        >>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
1458        Traceback (most recent call last):
1459        ...
1460        exploration.parsing.ParseError...
1461        >>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
1462        ((<Lexeme.effectCharges: ...>, 3), 1)
1463        >>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
1464        Traceback (most recent call last):
1465        ...
1466        exploration.parsing.ParseError...
1467        >>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
1468        ((<Lexeme.sepOrDelay: ...>, -2), 1)
1469        >>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
1470        (('skill', 'agility', 3), 2)
1471        >>> pf.parseOneEffectArg(
1472        ...     [
1473        ...         'main',
1474        ...         Lexeme.domainSeparator,
1475        ...         'zone',
1476        ...         Lexeme.zoneSeparator,
1477        ...         'decision',
1478        ...         Lexeme.zoneSeparator,
1479        ...         'compass',
1480        ...         Lexeme.mechanismSeparator,
1481        ...         'north',
1482        ...         'south',
1483        ...         'east',
1484        ...         'west'
1485        ...     ],
1486        ...     0
1487        ... )
1488        ((MechanismSpecifier(domain='main', zone='zone',\
1489 decision='decision', name='compass'), 'north'), 8)
1490        >>> pf.parseOneEffectArg(
1491        ...     [
1492        ...         'before',
1493        ...         'main',
1494        ...         Lexeme.domainSeparator,
1495        ...         'zone',
1496        ...         Lexeme.zoneSeparator,
1497        ...         'decision',
1498        ...         Lexeme.zoneSeparator,
1499        ...         'compass',
1500        ...         'north',
1501        ...         'south',
1502        ...         'east',
1503        ...         'west'
1504        ...     ],
1505        ...     1
1506        ... )  # a mechanism specifier without a state will become a
1507        ...    # decision specifier
1508        (DecisionSpecifier(domain='main', zone='zone',\
1509 name='decision'), 5)
1510        >>> tokens = [
1511        ...     'set',
1512        ...     'main',
1513        ...     Lexeme.domainSeparator,
1514        ...     'zone',
1515        ...     Lexeme.zoneSeparator,
1516        ...     'compass',
1517        ...     'north',
1518        ...     'bounce',
1519        ... ]
1520        >>> pf.parseOneEffectArg(tokens, 0)
1521        ('set', 0)
1522        >>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
1523        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1524        >>> pf.parseOneEffectArg(tokens, 1)
1525        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1526        >>> pf.parseOneEffectArg(tokens, 6)
1527        ('north', 6)
1528        >>> pf.parseOneEffectArg(tokens, 7)
1529        ('bounce', 7)
1530        >>> pf.parseOneEffectArg(
1531        ...     [
1532        ...         "fort", Lexeme.zoneSeparator, "gate",
1533        ...             Lexeme.mechanismSeparator, "open",
1534        ...     ],
1535        ...     0
1536        ... )
1537        ((MechanismSpecifier(domain=None, zone=None, decision='fort',\
1538 name='gate'), 'open'), 4)
1539        >>> pf.parseOneEffectArg(
1540        ...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
1541        ...     0
1542        ... ) == ([commands.command('val', '5')], 3)
1543        True
1544        >>> a = [
1545        ...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
1546        ...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
1547        ...     'pop', Lexeme.closeCurly
1548        ... ]
1549        >>> cl = [
1550        ...     [commands.command('val', '5')],
1551        ...     [commands.command('append'), commands.command('pop')]
1552        ... ]
1553        >>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
1554        True
1555        >>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
1556        True
1557        >>> pf.parseOneEffectArg(a, 1)
1558        ('val', 1)
1559        >>> pf.parseOneEffectArg(a, 2)
1560        ('5', 2)
1561        >>> pf.parseOneEffectArg(a, 3)
1562        Traceback (most recent call last):
1563        ...
1564        exploration.parsing.ParseError...
1565        """
1566        start, limit, nTokens = normalizeEnds(
1567            tokens,
1568            start,
1569            limit if limit is not None else -1
1570        )
1571        if nTokens == 0:
1572            raise ParseError("No effect arguments available.")
1573
1574        first = tokens[start]
1575
1576        if nTokens == 1:
1577            if first in (Lexeme.inCommon, Lexeme.isHidden):
1578                return (first, start)
1579            elif not isinstance(first, str):
1580                raise ParseError(
1581                    f"Only one token and it's a special character"
1582                    f" ({first} = {repr(self.formatDict[first])})"
1583                )
1584            else:
1585                return (cast(base.Capability, first), start)
1586
1587        assert (nTokens > 1)
1588
1589        second = tokens[start + 1]
1590
1591        # Command lists start with an open curly brace and effect
1592        # modifiers start with a Lexme, but nothing else may
1593        if first == Lexeme.openCurly:
1594            return self.parseCommandListFromTokens(tokens, start)
1595        elif first in (Lexeme.inCommon, Lexeme.isHidden):
1596            return (first, start)
1597        elif first in (Lexeme.sepOrDelay, Lexeme.effectCharges):
1598            if not isinstance(second, str):
1599                raise ParseError(
1600                    f"Token following a modifier that needs a count"
1601                    f" must be a string in tokens:"
1602                    f"\n{tokens[start:limit or len(tokens)]}"
1603                )
1604            try:
1605                val = int(second)
1606            except ValueError:
1607                raise ParseError(
1608                    f"Token following a modifier that needs a count"
1609                    f" must be convertible to an int:"
1610                    f"\n{tokens[start:limit or len(tokens)]}"
1611                )
1612
1613            first = cast(
1614                Literal[Lexeme.sepOrDelay, Lexeme.effectCharges],
1615                first
1616            )
1617            return ((first, val), start + 1)
1618        elif not isinstance(first, str):
1619            raise ParseError(
1620                f"First token must be a string unless it's a modifier"
1621                f" lexeme or command/reversion-set opener. Got:"
1622                f"\n{tokens[start:limit or len(tokens)]}"
1623            )
1624
1625        # If we have two strings in a row, then the first is our parsed
1626        # value alone and we'll parse the second separately.
1627        if isinstance(second, str):
1628            return (first, start)
1629        elif second in (Lexeme.inCommon, Lexeme.isHidden):
1630            return (first, start)
1631
1632        # Must have at least 3 tokens at this point, or else we need to
1633        # have the inCommon or isHidden lexeme second.
1634        if nTokens < 3:
1635            return (first, start)
1636
1637        third = tokens[start + 2]
1638        if not isinstance(third, str):
1639            return (first, start)
1640
1641        second = cast(Lexeme, second)
1642        third = cast(str, third)
1643
1644        if second in (Lexeme.tokenCount, Lexeme.skillLevel):
1645            try:
1646                num = int(third)
1647            except ValueError:
1648                raise ParseError(
1649                    f"Invalid effect tokens: count for Tokens or level"
1650                    f" for Skill must be convertible to an integer."
1651                    f"\n{tokens[start:limit + 1]}"
1652                )
1653            if second == Lexeme.tokenCount:
1654                return ((first, num), start + 2)  # token/count pair
1655            else:
1656                return (('skill', first, num), start + 2)  # token/count pair
1657
1658        elif second == Lexeme.mechanismSeparator:  # bare mechanism
1659            return (
1660                (
1661                    base.MechanismSpecifier(
1662                        domain=None,
1663                        zone=None,
1664                        decision=None,
1665                        name=first
1666                    ),
1667                    third
1668                ),
1669                start + 2
1670            )
1671
1672        elif second in (Lexeme.domainSeparator, Lexeme.zoneSeparator):
1673            try:
1674                mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
1675                    tokens,
1676                    start
1677                )  # works whether it's a mechanism or decision specifier...
1678            except ParseError:
1679                return self.parseDecisionSpecifierFromTokens(tokens, start)
1680            if mEnd + 2 > limit:
1681                # No room for following mechanism separator + state
1682                return self.parseDecisionSpecifierFromTokens(tokens, start)
1683            sep = tokens[mEnd + 1]
1684            after = tokens[mEnd + 2]
1685            if sep == Lexeme.mechanismSeparator:
1686                if not isinstance(after, str):
1687                    raise ParseError(
1688                        f"Mechanism separator not followed by state:"
1689                        f"\n{tokens[start]}"
1690                    )
1691                return ((mSpec, after), mEnd + 2)
1692            else:
1693                # No mechanism separator afterwards
1694                return self.parseDecisionSpecifierFromTokens(tokens, start)
1695
1696        else:  # unrecognized as a longer combo
1697            return (first, start)
1698
1699    def coalesceEffectArgs(
1700        self,
1701        tokens: LexedTokens,
1702        start: int = 0,
1703        end: int = -1
1704    ) -> Tuple[
1705        List[  # List of effect args
1706            Union[
1707                base.Capability,  # covers 'str' possibility
1708                Tuple[base.Token, base.TokenCount],
1709                Tuple[Literal['skill'], base.Skill, base.Level],
1710                Tuple[base.MechanismSpecifier, base.MechanismState],
1711                base.DecisionSpecifier,
1712                List[commands.Command],
1713                Set[str]
1714            ]
1715        ],
1716        Tuple[  # Slots for modifiers: common/hidden/charges/delay
1717            Optional[bool],
1718            Optional[bool],
1719            Optional[int],
1720            Optional[int],
1721        ]
1722    ]:
1723        """
1724        Given a region of a lexed tokens list which contains one or more
1725        effect arguments, combines token sequences representing things
1726        like capabilities, mechanism states, token counts, and skill
1727        levels, representing these using the tuples that would be passed
1728        to `base.effect`. Returns a tuple with two elements:
1729
1730        - First, a list that contains several different kinds of
1731            objects, each of which is distinguishable by its type or
1732            part of its value.
1733        - Next, a tuple with four entires for common, hidden, charges,
1734            and/or delay values based on the presence of modifier
1735            sequences. Any or all of these may be `None` if the relevant
1736            modifier was not present (the usual case).
1737
1738        For example:
1739
1740        >>> pf = ParseFormat()
1741        >>> pf.coalesceEffectArgs(["jump"])
1742        (['jump'], (None, None, None, None))
1743        >>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
1744        ([('coin', 3), 'fly'], (None, None, None, None))
1745        >>> pf.coalesceEffectArgs(
1746        ...     [
1747        ...         "fort", Lexeme.zoneSeparator, "gate",
1748        ...             Lexeme.mechanismSeparator, "open"
1749        ...     ]
1750        ... )
1751        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1752 name='gate'), 'open')], (None, None, None, None))
1753        >>> pf.coalesceEffectArgs(
1754        ...     [
1755        ...         "main", Lexeme.domainSeparator, "cliff"
1756        ...     ]
1757        ... )
1758        ([DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1759 (None, None, None, None))
1760        >>> pf.coalesceEffectArgs(
1761        ...     [
1762        ...         "door", Lexeme.mechanismSeparator, "open"
1763        ...     ]
1764        ... )
1765        ([(MechanismSpecifier(domain=None, zone=None, decision=None,\
1766 name='door'), 'open')], (None, None, None, None))
1767        >>> pf.coalesceEffectArgs(
1768        ...     [
1769        ...         "fort", Lexeme.zoneSeparator, "gate",
1770        ...             Lexeme.mechanismSeparator, "open",
1771        ...         "canJump",
1772        ...         "coins", Lexeme.tokenCount, "3",
1773        ...         Lexeme.inCommon,
1774        ...         "agility", Lexeme.skillLevel, "-1",
1775        ...         Lexeme.sepOrDelay, "0",
1776        ...         "main", Lexeme.domainSeparator, "cliff"
1777        ...     ]
1778        ... )
1779        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1780 name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1),\
1781 DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1782 (True, None, None, 0))
1783        >>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
1784        (['bounce'], (None, True, None, None))
1785        >>> pf.coalesceEffectArgs(
1786        ...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
1787        ... )
1788        (['goto', '3'], (True, True, None, None))
1789        """
1790        start, end, nTokens = normalizeEnds(tokens, start, end)
1791        where = start
1792        result: List[  # List of effect args
1793            Union[
1794                base.Capability,  # covers 'str' possibility
1795                Tuple[base.Token, base.TokenCount],
1796                Tuple[Literal['skill'], base.Skill, base.Level],
1797                Tuple[base.MechanismSpecifier, base.MechanismState],
1798                base.DecisionSpecifier,
1799                List[commands.Command],
1800                Set[str]
1801            ]
1802        ] = []
1803        inCommon: Optional[bool] = None
1804        isHidden: Optional[bool] = None
1805        charges: Optional[int] = None
1806        delay: Optional[int] = None
1807        while where <= end:
1808            following, thisEnd = self.parseOneEffectArg(tokens, where, end)
1809            if following == Lexeme.inCommon:
1810                if inCommon is not None:
1811                    raise ParseError(
1812                        f"In-common effect modifier specified more than"
1813                        f" once in effect args:"
1814                        f"\n{tokens[start:end + 1]}"
1815                    )
1816                inCommon = True
1817            elif following == Lexeme.isHidden:
1818                if isHidden is not None:
1819                    raise ParseError(
1820                        f"Is-hidden effect modifier specified more than"
1821                        f" once in effect args:"
1822                        f"\n{tokens[start:end + 1]}"
1823                    )
1824                isHidden = True
1825            elif (
1826                isinstance(following, tuple)
1827            and len(following) == 2
1828            and following[0] in (Lexeme.effectCharges, Lexeme.sepOrDelay)
1829            and isinstance(following[1], int)
1830            ):
1831                if following[0] == Lexeme.effectCharges:
1832                    if charges is not None:
1833                        raise ParseError(
1834                            f"Charges effect modifier specified more than"
1835                            f" once in effect args:"
1836                            f"\n{tokens[start:end + 1]}"
1837                        )
1838                    charges = following[1]
1839                else:
1840                    if delay is not None:
1841                        raise ParseError(
1842                            f"Delay effect modifier specified more than"
1843                            f" once in effect args:"
1844                            f"\n{tokens[start:end + 1]}"
1845                        )
1846                    delay = following[1]
1847            elif (
1848                    isinstance(following, base.Capability)
1849                 or (
1850                    isinstance(following, tuple)
1851                and len(following) == 2
1852                and isinstance(following[0], base.Token)
1853                and isinstance(following[1], base.TokenCount)
1854                ) or (
1855                    isinstance(following, tuple)
1856                and len(following) == 3
1857                and following[0] == 'skill'
1858                and isinstance(following[1], base.Skill)
1859                and isinstance(following[2], base.Level)
1860                ) or (
1861                    isinstance(following, tuple)
1862                and len(following) == 2
1863                and isinstance(following[0], base.MechanismSpecifier)
1864                and isinstance(following[1], base.MechanismState)
1865                ) or (
1866                    isinstance(following, base.DecisionSpecifier)
1867                ) or (
1868                    isinstance(following, list)
1869                and all(isinstance(item, tuple) for item in following)
1870                    # TODO: Stricter command list check here?
1871                ) or (
1872                    isinstance(following, set)
1873                and all(isinstance(item, str) for item in following)
1874                )
1875            ):
1876                result.append(following)
1877            else:
1878                raise ParseError(f"Invalid coalesced argument: {following}")
1879            where = thisEnd + 1
1880
1881        return (result, (inCommon, isHidden, charges, delay))
1882
1883    def parseEffectFromTokens(
1884        self,
1885        tokens: LexedTokens,
1886        start: int = 0,
1887        end: int = -1
1888    ) -> base.Effect:
1889        """
1890        Given a region of a list of lexed tokens specifying an effect,
1891        returns the `Effect` object that those tokens specify.
1892        """
1893        start, end, nTokens = normalizeEnds(tokens, start, end)
1894
1895        # Check for empty list
1896        if nTokens == 0:
1897            raise ParseError(
1898                "Effect must include at least a type."
1899            )
1900
1901        firstPart = tokens[start]
1902
1903        if isinstance(firstPart, Lexeme):
1904            raise ParseError(
1905                f"First part of effect must be an effect type. Got"
1906                f" {firstPart} ({repr(self.formatDict[firstPart])})."
1907            )
1908
1909        firstPart = cast(str, firstPart)
1910
1911        # Get the effect type
1912        fType = self.effectType(firstPart)
1913
1914        if fType is None:
1915            raise ParseError(
1916                f"Unrecognized effect type {firstPart!r}. Check the"
1917                f" EffectType entries in the effect names dictionary."
1918            )
1919
1920        if start + 1 > end:  # No tokens left: set empty args
1921            groupedArgs: List[
1922                Union[
1923                    base.Capability,  # covers 'str' possibility
1924                    Tuple[base.Token, base.TokenCount],
1925                    Tuple[Literal['skill'], base.Skill, base.Level],
1926                    Tuple[base.MechanismSpecifier, base.MechanismState],
1927                    base.DecisionSpecifier,
1928                    List[commands.Command],
1929                    Set[str]
1930                ]
1931            ] = []
1932            modifiers: Tuple[
1933                Optional[bool],
1934                Optional[bool],
1935                Optional[int],
1936                Optional[int]
1937            ] = (None, None, None, None)
1938        else:  # Coalesce remaining tokens if there are any
1939            groupedArgs, modifiers = self.coalesceEffectArgs(
1940                tokens,
1941                start + 1,
1942                end
1943            )
1944
1945        # Set up arguments for base.effect and handle modifiers first
1946        args: Dict[
1947            str,
1948            Union[
1949                None,
1950                base.ContextSpecifier,
1951                base.Capability,
1952                Tuple[base.Token, base.TokenCount],
1953                Tuple[Literal['skill'], base.Skill, base.Level],
1954                Tuple[base.MechanismSpecifier, base.MechanismState],
1955                Tuple[base.MechanismSpecifier, List[base.MechanismState]],
1956                List[base.Capability],
1957                base.AnyDecisionSpecifier,
1958                Tuple[base.AnyDecisionSpecifier, base.FocalPointName],
1959                bool,
1960                int,
1961                base.SaveSlot,
1962                Tuple[base.SaveSlot, Set[str]]
1963            ]
1964        ] = {}
1965        if modifiers[0]:
1966            args['applyTo'] = 'common'
1967        if modifiers[1]:
1968            args['hidden'] = True
1969        else:
1970            args['hidden'] = False
1971        if modifiers[2] is not None:
1972            args['charges'] = modifiers[2]
1973        if modifiers[3] is not None:
1974            args['delay'] = modifiers[3]
1975
1976        # Now handle the main effect-type-based argument
1977        if fType in ("gain", "lose"):
1978            if len(groupedArgs) != 1:
1979                raise ParseError(
1980                    f"'{fType}' effect must have exactly one grouped"
1981                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
1982                )
1983            thing = groupedArgs[0]
1984            if isinstance(thing, tuple):
1985                if len(thing) == 2:
1986                    if (
1987                        not isinstance(thing[0], base.Token)
1988                     or not isinstance(thing[1], base.TokenCount)
1989                    ):
1990                        raise ParseError(
1991                            f"'{fType}' effect grouped arg pair must be a"
1992                            f" (token, amount) pair. Got:\n{thing}"
1993                        )
1994                elif len(thing) == 3:
1995                    if (
1996                        thing[0] != 'skill'
1997                     or not isinstance(thing[1], base.Skill)
1998                     or not isinstance(thing[2], base.Level)
1999                    ):
2000                        raise ParseError(
2001                            f"'{fType}' effect grouped arg pair must be a"
2002                            f" (token, amount) pair. Got:\n{thing}"
2003                        )
2004                else:
2005                    raise ParseError(
2006                        f"'{fType}' effect grouped arg tuple must have"
2007                        f" length 2 or 3. Got (length {len(thing)}):\n{thing}"
2008                    )
2009            elif not isinstance(thing, base.Capability):
2010                raise ParseError(
2011                    f"'{fType}' effect grouped arg must be a capability"
2012                    f" or a (token, amount) tuple. Got:\n{thing}"
2013                )
2014            args[fType] = thing
2015            return base.effect(**args)  # type:ignore
2016
2017        elif fType == "set":
2018            if len(groupedArgs) != 1:
2019                raise ParseError(
2020                    f"'{fType}' effect must have exactly one grouped"
2021                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
2022                )
2023            setVal = groupedArgs[0]
2024            if not isinstance(
2025                setVal,
2026                tuple
2027            ):
2028                raise ParseError(
2029                    f"'{fType}' effect grouped arg must be a tuple. Got:"
2030                    f"\n{setVal}"
2031                )
2032            if len(setVal) == 2:
2033                setWhat, setTo = setVal
2034                if (
2035                    isinstance(setWhat, base.Token)
2036                and isinstance(setTo, base.TokenCount)
2037                ) or (
2038                    isinstance(setWhat, base.MechanismSpecifier)
2039                and isinstance(setTo, base.MechanismState)
2040                ):
2041                    args[fType] = setVal
2042                    return base.effect(**args)  # type:ignore
2043                else:
2044                    raise ParseError(
2045                        f"Invalid '{fType}' effect grouped args:"
2046                        f"\n{groupedArgs}"
2047                    )
2048            elif len(setVal) == 3:
2049                indicator, whichSkill, setTo = setVal
2050                if (
2051                    indicator == 'skill'
2052                and isinstance(whichSkill, base.Skill)
2053                and isinstance(setTo, base.Level)
2054                ):
2055                    args[fType] = setVal
2056                    return base.effect(**args)  # type:ignore
2057                else:
2058                    raise ParseError(
2059                        f"Invalid '{fType}' effect grouped args (not a"
2060                        f" skill):\n{groupedArgs}"
2061                    )
2062            else:
2063                raise ParseError(
2064                    f"Invalid '{fType}' effect grouped args (wrong"
2065                    f" length tuple):\n{groupedArgs}"
2066                )
2067
2068        elif fType == "toggle":
2069            if len(groupedArgs) == 0:
2070                raise ParseError(
2071                    f"'{fType}' effect must have at least one grouped"
2072                    f" argument. Got:\n{groupedArgs}"
2073                )
2074            if (
2075                isinstance(groupedArgs[0], tuple)
2076            and len(groupedArgs[0]) == 2
2077            and isinstance(groupedArgs[0][0], base.MechanismSpecifier)
2078            and isinstance(groupedArgs[0][1], base.MechanismState)
2079            and all(
2080                    isinstance(a, base.MechanismState)
2081                    for a in groupedArgs[1:]
2082                )
2083            ):  # a mechanism toggle
2084                args[fType] = (
2085                    groupedArgs[0][0],
2086                    cast(
2087                        List[base.MechanismState],
2088                        [groupedArgs[0][1]] + groupedArgs[1:]
2089                    )
2090                )
2091                return base.effect(**args)  # type:ignore
2092            elif all(isinstance(a, base.Capability) for a in groupedArgs):
2093                # a capability toggle
2094                args[fType] = cast(List[base.Capability], groupedArgs)
2095                return base.effect(**args)  # type:ignore
2096            else:
2097                raise ParseError(
2098                    f"Invalid arguments for '{fType}' effect. Got:"
2099                    f"\n{groupedArgs}"
2100                )
2101
2102        elif fType in ("bounce", "deactivate"):
2103            if len(groupedArgs) != 0:
2104                raise ParseError(
2105                    f"'{fType}' effect may not include any"
2106                    f" arguments. Got {len(groupedArgs)}):"
2107                    f"\n{groupedArgs}"
2108                )
2109            args[fType] = True
2110            return base.effect(**args)  # type:ignore
2111
2112        elif fType == "follow":
2113            if len(groupedArgs) != 1:
2114                raise ParseError(
2115                    f"'{fType}' effect must include exactly one"
2116                    f" argument. Got {len(groupedArgs)}):"
2117                    f"\n{groupedArgs}"
2118                )
2119
2120            transition = groupedArgs[0]
2121            if not isinstance(transition, base.Transition):
2122                raise ParseError(
2123                    f"Invalid argument for '{fType}' effect. Needed a"
2124                    f" transition but got:\n{groupedArgs}"
2125                )
2126            args[fType] = transition
2127            return base.effect(**args)  # type:ignore
2128
2129        elif fType == "edit":
2130            if len(groupedArgs) == 0:
2131                raise ParseError(
2132                    "An 'edit' effect requires at least one argument."
2133                )
2134            for i, arg in enumerate(groupedArgs):
2135                if not isinstance(arg, list):
2136                    raise ParseError(
2137                        f"'edit' effect argument {i} is not a sub-list:"
2138                        f"\n  {arg!r}"
2139                        f"\nAmong arguments:"
2140                        f"\n  {groupedArgs}"
2141                    )
2142                for j, cmd in enumerate(arg):
2143                    if not isinstance(cmd, tuple):
2144                        raise ParseError(
2145                            f"'edit' effect argument {i} contains"
2146                            f" non-tuple part {j}:"
2147                            f"\n  {cmd!r}"
2148                            f"\nAmong arguments:"
2149                            f"\n  {groupedArgs}"
2150                        )
2151
2152            args[fType] = groupedArgs  # type:ignore
2153            return base.effect(**args)  # type:ignore
2154
2155        elif fType == "goto":
2156            if len(groupedArgs) not in (1, 2):
2157                raise ParseError(
2158                    f"A 'goto' effect must include either one or two"
2159                    f" grouped arguments. Got {len(groupedArgs)}:"
2160                    f"\n{groupedArgs}"
2161                )
2162
2163            first = groupedArgs[0]
2164            if not isinstance(
2165                first,
2166                (base.DecisionName, base.DecisionSpecifier)
2167            ):
2168                raise ParseError(
2169                    f"'{fType}' effect must first specify a destination"
2170                    f" decision. Got:\n{groupedArgs}"
2171                )
2172
2173            # Check if it's really a decision ID
2174            dSpec: base.AnyDecisionSpecifier
2175            if isinstance(first, base.DecisionName):
2176                try:
2177                    dSpec = int(first)
2178                except ValueError:
2179                    dSpec = first
2180            else:
2181                dSpec = first
2182
2183            if len(groupedArgs) == 2:
2184                second = groupedArgs[1]
2185                if not isinstance(second, base.FocalPointName):
2186                    raise ParseError(
2187                        f"'{fType}' effect must have a focal point name"
2188                        f" if it has a second part. Got:\n{groupedArgs}"
2189                    )
2190                args[fType] = (dSpec, second)
2191            else:
2192                args[fType] = dSpec
2193
2194            return base.effect(**args)  # type:ignore
2195
2196        elif fType == "save":
2197            if len(groupedArgs) not in (0, 1):
2198                raise ParseError(
2199                    f"'{fType}' effect must include exactly zero or one"
2200                    f" argument(s). Got {len(groupedArgs)}):"
2201                    f"\n{groupedArgs}"
2202                )
2203
2204            if len(groupedArgs) == 1:
2205                slot = groupedArgs[0]
2206            else:
2207                slot = base.DEFAULT_SAVE_SLOT
2208            if not isinstance(slot, base.SaveSlot):
2209                raise ParseError(
2210                    f"Invalid argument for '{fType}' effect. Needed a"
2211                    f" save slot but got:\n{groupedArgs}"
2212                )
2213            args[fType] = slot
2214            return base.effect(**args)  # type:ignore
2215
2216        else:
2217            raise ParseError(f"Invalid effect type: '{fType}'.")
2218
2219    def parseEffect(self, effectStr: str) -> base.Effect:
2220        """
2221        Works like `parseEffectFromTokens` but starts with a raw string.
2222        For example:
2223
2224        >>> pf = ParseFormat()
2225        >>> pf.parseEffect("gain jump") == base.effect(gain='jump')
2226        True
2227        >>> pf.parseEffect("set door:open") == base.effect(
2228        ...     set=(
2229        ...         base.MechanismSpecifier(None, None, None, 'door'),
2230        ...         'open'
2231        ...     )
2232        ... )
2233        True
2234        >>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
2235        True
2236        >>> pf.parseEffect("set agility^3") == base.effect(
2237        ...     set=('skill', 'agility', 3)
2238        ... )
2239        True
2240        """
2241        return self.parseEffectFromTokens(self.lex(effectStr))
2242
2243    def unparseEffect(self, effect: base.Effect) -> str:
2244        """
2245        The opposite of `parseEffect`; turns an effect back into a
2246        string reprensentation.
2247
2248        For example:
2249
2250        >>> pf = ParseFormat()
2251        >>> e = {
2252        ...     "type": "gain",
2253        ...     "applyTo": "active",
2254        ...     "value": "flight",
2255        ...     "delay": None,
2256        ...     "charges": None,
2257        ...     "hidden": False
2258        ... }
2259        >>> pf.unparseEffect(e)
2260        'gain flight'
2261        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2262        True
2263        >>> s = 'gain flight'
2264        >>> pf.unparseEffect(pf.parseEffect(s)) == s
2265        True
2266        >>> s2 = '  gain\\nflight'
2267        >>> pf.unparseEffect(pf.parseEffect(s2)) == s
2268        True
2269        >>> e = {
2270        ...     "type": "gain",
2271        ...     "applyTo": "active",
2272        ...     "value": ("gold", 5),
2273        ...     "delay": 1,
2274        ...     "charges": 2,
2275        ...     "hidden": False
2276        ... }
2277        >>> pf.unparseEffect(e)
2278        'gain gold*5 ,1 =2'
2279        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2280        True
2281        >>> e = {
2282        ...     "type": "set",
2283        ...     "applyTo": "active",
2284        ...     "value": (
2285        ...         base.MechanismSpecifier(None, None, None, "gears"),
2286        ...         "on"
2287        ...     ),
2288        ...     "delay": None,
2289        ...     "charges": 1,
2290        ...     "hidden": False
2291        ... }
2292        >>> pf.unparseEffect(e)
2293        'set gears:on =1'
2294        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2295        True
2296        >>> e = {
2297        ...     "type": "toggle",
2298        ...     "applyTo": "active",
2299        ...     "value": ["red", "blue"],
2300        ...     "delay": None,
2301        ...     "charges": None,
2302        ...     "hidden": False
2303        ... }
2304        >>> pf.unparseEffect(e)
2305        'toggle red blue'
2306        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2307        True
2308        >>> e = {
2309        ...     "type": "toggle",
2310        ...     "applyTo": "active",
2311        ...     "value": (
2312        ...         base.MechanismSpecifier(None, None, None, "switch"),
2313        ...         ["on", "off"]
2314        ...     ),
2315        ...     "delay": None,
2316        ...     "charges": None,
2317        ...     "hidden": False
2318        ... }
2319        >>> pf.unparseEffect(e)
2320        'toggle switch:on off'
2321        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2322        True
2323        >>> e = {
2324        ...     "type": "deactivate",
2325        ...     "applyTo": "active",
2326        ...     "value": None,
2327        ...     "delay": 2,
2328        ...     "charges": None,
2329        ...     "hidden": False
2330        ... }
2331        >>> pf.unparseEffect(e)
2332        'deactivate ,2'
2333        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2334        True
2335        >>> e = {
2336        ...     "type": "goto",
2337        ...     "applyTo": "common",
2338        ...     "value": 3,
2339        ...     "delay": None,
2340        ...     "charges": None,
2341        ...     "hidden": False
2342        ... }
2343        >>> pf.unparseEffect(e)
2344        'goto 3 +c'
2345        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2346        True
2347        >>> e = {
2348        ...     "type": "goto",
2349        ...     "applyTo": "common",
2350        ...     "value": 3,
2351        ...     "delay": None,
2352        ...     "charges": None,
2353        ...     "hidden": True
2354        ... }
2355        >>> pf.unparseEffect(e)
2356        'goto 3 +c +h'
2357        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2358        True
2359        >>> e = {
2360        ...     "type": "goto",
2361        ...     "applyTo": "active",
2362        ...     "value": 'home',
2363        ...     "delay": None,
2364        ...     "charges": None,
2365        ...     "hidden": False
2366        ... }
2367        >>> pf.unparseEffect(e)
2368        'goto home'
2369        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2370        True
2371        >>> e = base.effect(edit=[
2372        ...     [
2373        ...         commands.command('val', '5'),
2374        ...         commands.command('empty', 'list'),
2375        ...         commands.command('append', '$_')
2376        ...     ],
2377        ...     [
2378        ...         commands.command('val', '11'),
2379        ...         commands.command('assign', 'var', '$_'),
2380        ...         commands.command('op', '+', '$var', '$var')
2381        ...     ],
2382        ... ])
2383        >>> pf.unparseEffect(e)
2384        'edit {\\n  val 5;\\n  empty list;\\n  append $_;\\n}\
2385 {\\n  val 11;\\n  assign var $_;\\n  op + $var $var;\\n}'
2386        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2387        True
2388        """
2389        result: List[str] = []
2390
2391        # Reverse the effect type into a marker
2392        eType = effect['type']
2393        for key, val in self.effectNames.items():
2394            if val == eType:
2395                if len(result) != 0:
2396                    raise ParseError(
2397                        f"Effect map contains multiple matching entries"
2398                        f"for effect type '{effect['type']}':"
2399                        f" '{result[0]}' and '{key}'"
2400                    )
2401                result.append(key)
2402                # Don't break 'cause we'd like to check uniqueness
2403
2404        eVal = effect['value']
2405        if eType in ('gain', 'lose'):
2406            eVal = cast(Union[base.Capability, Tuple[base.Token, int]], eVal)
2407            if isinstance(eVal, str):  # a capability
2408                result.append(eVal)
2409            else:  # a token
2410                result.append(
2411                    eVal[0]
2412                  + self.formatDict[Lexeme.tokenCount]
2413                  + str(eVal[1])
2414                )
2415        elif eType == 'set':
2416            eVal = cast(
2417                # TODO: Add skill level setting here & elsewhere
2418                Union[
2419                    Tuple[base.Token, base.TokenCount],
2420                    Tuple[base.MechanismSpecifier, base.MechanismState]
2421                ],
2422                eVal
2423            )
2424            if len(eVal) != 2:
2425                raise ValueError(
2426                    f"'set' effect has non-length-2 value:"
2427                    f"\n  {repr(effect)}"
2428                )
2429            if isinstance(eVal[1], int):  # a token count
2430                result.append(eVal[0])
2431                result.append(self.formatDict[Lexeme.tokenCount])
2432                result.append(str(eVal[1]))
2433            else:  # a mechanism
2434                if isinstance(eVal[0], base.MechanismSpecifier):
2435                    mSpec = self.unparseMechanismSpecifier(eVal[0])
2436                else:
2437                    print(f"eval[0] is: {type(eVal[0])} : {eVal[0]!r}")
2438                    assert isinstance(eVal[0], base.MechanismName)
2439                    mSpec = eVal[0]
2440                result.append(
2441                    mSpec
2442                  + self.formatDict[Lexeme.mechanismSeparator]
2443                  + eVal[1]
2444                )
2445        elif eType == 'toggle':
2446            if isinstance(eVal, tuple):  # mechanism states
2447                tSpec, states = cast(
2448                    Tuple[
2449                        base.AnyMechanismSpecifier,
2450                        List[base.MechanismState]
2451                    ],
2452                    eVal
2453                )
2454                firstState = states[0]
2455                restStates = states[1:]
2456                if isinstance(tSpec, base.MechanismSpecifier):
2457                    mStr = self.unparseMechanismSpecifier(tSpec)
2458                else:
2459                    mStr = str(tSpec)
2460                result.append(
2461                    mStr
2462                  + self.formatDict[Lexeme.mechanismSeparator]
2463                  + firstState
2464                )
2465                result.extend(restStates)
2466            else:  # capabilities
2467                assert isinstance(eVal, list)
2468                eVal = cast(List[base.Capability], eVal)
2469                result.extend(eVal)
2470        elif eType in ('deactivate', 'bounce'):
2471            if eVal is not None:
2472                raise ValueError(
2473                    f"'{eType}' effect has non-None value:"
2474                    f"\n  {repr(effect)}"
2475                )
2476        elif eType == 'follow':
2477            eVal = cast(base.Token, eVal)
2478            result.append(eVal)
2479        elif eType == 'edit':
2480            eVal = cast(List[List[commands.Command]], eVal)
2481            if len(eVal) == 0:
2482                result[-1] = '{}'
2483            else:
2484                for cmdList in eVal:
2485                    result.append(
2486                        self.unparseCommandList(cmdList)
2487                    )
2488        elif eType == 'goto':
2489            if isinstance(eVal, base.DecisionSpecifier):
2490                result.append(self.unparseDecisionSpecifier(eVal))
2491            elif isinstance(eVal, (base.DecisionID, base.DecisionName)):
2492                result.append(str(eVal))
2493            elif (
2494                isinstance(eVal, tuple)
2495            and len(eVal) == 2
2496            and isinstance(eVal[1], base.FocalPointName)
2497            ):
2498                if isinstance(eVal[0], base.DecisionSpecifier):
2499                    result.append(self.unparseDecisionSpecifier(eVal[0]))
2500                else:
2501                    result.append(str(eVal[0]))
2502                result.append(eVal[1])
2503            else:
2504                raise ValueError(
2505                    f"'{eType}' effect has invalid value {eVal}"
2506                )
2507        elif eType == 'save':
2508            # It's just a string naming the save slot
2509            result.append(eVal)
2510        else:
2511            raise ValueError(
2512                f"Unrecognized effect type '{eType}' in effect:"
2513                f"\n  {repr(effect)}"
2514            )
2515
2516        # Add modifier strings
2517        if effect['applyTo'] == 'common':
2518            result.append(self.formatDict[Lexeme.inCommon])
2519
2520        if effect['hidden']:
2521            result.append(self.formatDict[Lexeme.isHidden])
2522
2523        dVal = effect['delay']
2524        if dVal is not None:
2525            result.append(
2526                self.formatDict[Lexeme.sepOrDelay] + str(dVal)
2527            )
2528
2529        cVal = effect['charges']
2530        if cVal is not None:
2531            result.append(
2532                self.formatDict[Lexeme.effectCharges] + str(cVal)
2533            )
2534
2535        joined = ''
2536        before = False
2537        for r in result:
2538            if (
2539                r.startswith(' ')
2540             or r.startswith('\n')
2541             or r.endswith(' ')
2542             or r.endswith('\n')
2543            ):
2544                joined += r
2545                before = False
2546            else:
2547                joined += (' ' if before else '') + r
2548                before = True
2549        return joined
2550
2551    def parseDecisionSpecifierFromTokens(
2552        self,
2553        tokens: LexedTokens,
2554        start: int = 0
2555    ) -> Tuple[Union[base.DecisionSpecifier, int], int]:
2556        """
2557        Parses a decision specifier starting at the specified position
2558        in the given tokens list. No ending position is specified, but
2559        instead this function returns a tuple containing the parsed
2560        `base.DecisionSpecifier` along with an index in the tokens list
2561        where the end of the specifier was found.
2562
2563        For example:
2564
2565        >>> pf = ParseFormat()
2566        >>> pf.parseDecisionSpecifierFromTokens(['m'])
2567        (DecisionSpecifier(domain=None, zone=None, name='m'), 0)
2568        >>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
2569        (12, 0)
2570        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
2571        (DecisionSpecifier(domain=None, zone=None, name='a'), 0)
2572        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
2573        (DecisionSpecifier(domain=None, zone=None, name='m'), 1)
2574        >>> pf.parseDecisionSpecifierFromTokens(
2575        ...     ['a', Lexeme.domainSeparator, 'm']
2576        ... )
2577        (DecisionSpecifier(domain='a', zone=None, name='m'), 2)
2578        >>> pf.parseDecisionSpecifierFromTokens(
2579        ...     ['a', Lexeme.zoneSeparator, 'm']
2580        ... )
2581        (DecisionSpecifier(domain=None, zone='a', name='m'), 2)
2582        >>> pf.parseDecisionSpecifierFromTokens(
2583        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
2584        ... )
2585        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2586        >>> pf.parseDecisionSpecifierFromTokens(
2587        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
2588        ... )
2589        (DecisionSpecifier(domain='a', zone='b', name='m'), 4)
2590        >>> pf.parseDecisionSpecifierFromTokens(
2591        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
2592        ... )
2593        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2594        >>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
2595        ...     ['a', Lexeme.zoneSeparator, '5'],
2596        ... )
2597        Traceback (most recent call last):
2598        ...
2599        exploration.base.InvalidDecisionSpecifierError...
2600        >>> pf.parseDecisionSpecifierFromTokens(
2601        ...     ['d', Lexeme.domainSeparator, '123']
2602        ... )
2603        Traceback (most recent call last):
2604        ...
2605        exploration.base.InvalidDecisionSpecifierError...
2606        >>> pf.parseDecisionSpecifierFromTokens(
2607        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2608        ...     1
2609        ... )
2610        Traceback (most recent call last):
2611        ...
2612        exploration.parsing.ParseError...
2613        >>> pf.parseDecisionSpecifierFromTokens(
2614        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2615        ...     2
2616        ... )
2617        (DecisionSpecifier(domain='b', zone=None, name='m'), 4)
2618        >>> pf.parseDecisionSpecifierFromTokens(
2619        ...     [
2620        ...         'a',
2621        ...         Lexeme.domainSeparator,
2622        ...         'b',
2623        ...         Lexeme.zoneSeparator,
2624        ...         'c',
2625        ...         Lexeme.zoneSeparator,
2626        ...         'm'
2627        ...     ]
2628        ... )
2629        (DecisionSpecifier(domain='a', zone='b', name='c'), 4)
2630        >>> pf.parseDecisionSpecifierFromTokens(
2631        ...     [
2632        ...         'a',
2633        ...         Lexeme.domainSeparator,
2634        ...         'b',
2635        ...         Lexeme.zoneSeparator,
2636        ...         'c',
2637        ...         Lexeme.zoneSeparator,
2638        ...         'm'
2639        ...     ],
2640        ...     2
2641        ... )
2642        (DecisionSpecifier(domain=None, zone='b', name='c'), 4)
2643        >>> pf.parseDecisionSpecifierFromTokens(
2644        ...     [
2645        ...         'a',
2646        ...         Lexeme.domainSeparator,
2647        ...         'b',
2648        ...         Lexeme.zoneSeparator,
2649        ...         'c',
2650        ...         Lexeme.zoneSeparator,
2651        ...         'm'
2652        ...     ],
2653        ...     4
2654        ... )
2655        (DecisionSpecifier(domain=None, zone='c', name='m'), 6)
2656        >>> pf.parseDecisionSpecifierFromTokens(
2657        ...     [
2658        ...         'set',
2659        ...         'main',
2660        ...         Lexeme.domainSeparator,
2661        ...         'zone',
2662        ...         Lexeme.zoneSeparator,
2663        ...         'compass',
2664        ...         'north',
2665        ...         'bounce',
2666        ...     ],
2667        ...     1
2668        ... )
2669        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
2670        """
2671        # Check bounds & normalize start index
2672        nTokens = len(tokens)
2673        if start < -nTokens:
2674            raise IndexError(
2675                f"Invalid start index {start} for {nTokens} tokens (too"
2676                f" negative)."
2677            )
2678        elif start >= nTokens:
2679            raise IndexError(
2680                f"Invalid start index {start} for {nTokens} tokens (too"
2681                f" big)."
2682            )
2683        elif start < 0:
2684            start = nTokens + start
2685
2686        assert (start < nTokens)
2687
2688        first = tokens[start]
2689        if not isinstance(first, str):
2690            raise ParseError(
2691                f"Invalid domain specifier (must start with a name or"
2692                f" id; got: {first} = {self.formatDict[first]})."
2693            )
2694
2695        ds = base.DecisionSpecifier(None, None, first)
2696        result = (base.idOrDecisionSpecifier(ds), start)
2697
2698        domain = None
2699        zoneOrDecision = None
2700
2701        if start + 1 >= nTokens:  # at end of tokens
2702            return result
2703
2704        firstSep = tokens[start + 1]
2705        if firstSep == Lexeme.domainSeparator:
2706            domain = first
2707        elif firstSep == Lexeme.zoneSeparator:
2708            zoneOrDecision = first
2709        else:
2710            return result
2711
2712        if start + 2 >= nTokens:
2713            return result
2714
2715        second = tokens[start + 2]
2716        if isinstance(second, Lexeme):
2717            return result
2718
2719        ds = base.DecisionSpecifier(domain, zoneOrDecision, second)
2720        result = (base.idOrDecisionSpecifier(ds), start + 2)
2721
2722        if start + 3 >= nTokens:
2723            return result
2724
2725        secondSep = tokens[start + 3]
2726        if start + 4 >= nTokens:
2727            return result
2728
2729        third = tokens[start + 4]
2730        if secondSep == Lexeme.zoneSeparator:
2731            if zoneOrDecision is not None:  # two in a row
2732                return result
2733            else:
2734                if not isinstance(third, base.DecisionName):
2735                    return result
2736                else:
2737                    zoneOrDecision = second
2738        else:
2739            return result
2740
2741        if isinstance(third, Lexeme):
2742            return result
2743
2744        ds = base.DecisionSpecifier(domain, zoneOrDecision, third)
2745        return (base.idOrDecisionSpecifier(ds), start + 4)
2746
2747    def parseDecisionSpecifier(
2748        self,
2749        specString: str
2750    ) -> Union[base.DecisionID, base.DecisionSpecifier]:
2751        """
2752        Parses a full `DecisionSpecifier` from a single string. Can
2753        parse integer decision IDs in string form, and returns a
2754        `DecisionID` in that case, otherwise returns a
2755        `DecisionSpecifier`. Assumes that all int-convertible strings
2756        are decision IDs, so it cannot deal with feature names which are
2757        just numbers.
2758
2759        For example:
2760
2761        >>> pf = ParseFormat()
2762        >>> pf.parseDecisionSpecifier('example')
2763        DecisionSpecifier(domain=None, zone=None, name='example')
2764        >>> pf.parseDecisionSpecifier('outer::example')
2765        DecisionSpecifier(domain=None, zone='outer', name='example')
2766        >>> pf.parseDecisionSpecifier('domain//region::feature')
2767        DecisionSpecifier(domain='domain', zone='region', name='feature')
2768        >>> pf.parseDecisionSpecifier('123')
2769        123
2770        >>> pf.parseDecisionSpecifier('region::domain//feature')
2771        Traceback (most recent call last):
2772        ...
2773        exploration.base.InvalidDecisionSpecifierError...
2774        >>> pf.parseDecisionSpecifier('domain1//domain2//feature')
2775        Traceback (most recent call last):
2776        ...
2777        exploration.base.InvalidDecisionSpecifierError...
2778        >>> pf.parseDecisionSpecifier('domain//123')
2779        Traceback (most recent call last):
2780        ...
2781        exploration.base.InvalidDecisionSpecifierError...
2782        >>> pf.parseDecisionSpecifier('region::123')
2783        Traceback (most recent call last):
2784        ...
2785        exploration.base.InvalidDecisionSpecifierError...
2786        """
2787        try:
2788            return int(specString)
2789        except ValueError:
2790            tokens = self.lex(specString)
2791            result, end = self.parseDecisionSpecifierFromTokens(tokens)
2792            if end != len(tokens) - 1:
2793                raise base.InvalidDecisionSpecifierError(
2794                    f"Junk after end of decision specifier:"
2795                    f"\n{tokens[end + 1:]}"
2796                )
2797            return result
2798
2799    def parseFeatureSpecifierFromTokens(
2800        self,
2801        tokens: LexedTokens,
2802        start: int = 0,
2803        limit: int = -1
2804    ) -> Tuple[base.FeatureSpecifier, int]:
2805        """
2806        Parses a `FeatureSpecifier` starting from the specified part of
2807        a tokens list. Returns a tuple containing the feature specifier
2808        and the end position of the end of the feature specifier.
2809
2810        Can parse integer feature IDs in string form, as well as nested
2811        feature specifiers and plain feature specifiers. Assumes that
2812        all int-convertible strings are feature IDs, so it cannot deal
2813        with feature names which are just numbers.
2814
2815        For example:
2816
2817        >>> pf = ParseFormat()
2818        >>> pf.parseFeatureSpecifierFromTokens(['example'])
2819        (FeatureSpecifier(domain=None, within=[], feature='example',\
2820 part=None), 0)
2821        >>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
2822        (FeatureSpecifier(domain=None, within=[], feature='example2',\
2823 part=None), 1)
2824        >>> pf.parseFeatureSpecifierFromTokens(
2825        ...     [
2826        ...         'domain',
2827        ...         Lexeme.domainSeparator,
2828        ...         'region',
2829        ...         Lexeme.zoneSeparator,
2830        ...         'feature',
2831        ...         Lexeme.partSeparator,
2832        ...         'part'
2833        ...     ]
2834        ... )
2835        (FeatureSpecifier(domain='domain', within=['region'],\
2836 feature='feature', part='part'), 6)
2837        >>> pf.parseFeatureSpecifierFromTokens(
2838        ...     [
2839        ...         'outerRegion',
2840        ...         Lexeme.zoneSeparator,
2841        ...         'midRegion',
2842        ...         Lexeme.zoneSeparator,
2843        ...         'innerRegion',
2844        ...         Lexeme.zoneSeparator,
2845        ...         'feature'
2846        ...     ]
2847        ... )
2848        (FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
2849 'innerRegion'], feature='feature', part=None), 6)
2850        >>> pf.parseFeatureSpecifierFromTokens(
2851        ...     [
2852        ...         'outerRegion',
2853        ...         Lexeme.zoneSeparator,
2854        ...         'midRegion',
2855        ...         Lexeme.zoneSeparator,
2856        ...         'innerRegion',
2857        ...         Lexeme.zoneSeparator,
2858        ...         'feature'
2859        ...     ],
2860        ...     1
2861        ... )
2862        Traceback (most recent call last):
2863        ...
2864        exploration.parsing.InvalidFeatureSpecifierError...
2865        >>> pf.parseFeatureSpecifierFromTokens(
2866        ...     [
2867        ...         'outerRegion',
2868        ...         Lexeme.zoneSeparator,
2869        ...         'midRegion',
2870        ...         Lexeme.zoneSeparator,
2871        ...         'innerRegion',
2872        ...         Lexeme.zoneSeparator,
2873        ...         'feature'
2874        ...     ],
2875        ...     2
2876        ... )
2877        (FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'],\
2878 feature='feature', part=None), 6)
2879        >>> pf.parseFeatureSpecifierFromTokens(
2880        ...     [
2881        ...         'outerRegion',
2882        ...         Lexeme.zoneSeparator,
2883        ...         'feature',
2884        ...         Lexeme.domainSeparator,
2885        ...         'after',
2886        ...     ]
2887        ... )
2888        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2889 feature='feature', part=None), 2)
2890        >>> pf.parseFeatureSpecifierFromTokens(
2891        ...     [
2892        ...         'outerRegion',
2893        ...         Lexeme.zoneSeparator,
2894        ...         'feature',
2895        ...         Lexeme.domainSeparator,
2896        ...         'after',
2897        ...     ],
2898        ...     2
2899        ... )
2900        (FeatureSpecifier(domain='feature', within=[], feature='after',\
2901 part=None), 4)
2902        >>> # Including a limit:
2903        >>> pf.parseFeatureSpecifierFromTokens(
2904        ...     [
2905        ...         'outerRegion',
2906        ...         Lexeme.zoneSeparator,
2907        ...         'midRegion',
2908        ...         Lexeme.zoneSeparator,
2909        ...         'feature',
2910        ...     ],
2911        ...     0,
2912        ...     2
2913        ... )
2914        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2915 feature='midRegion', part=None), 2)
2916        >>> pf.parseFeatureSpecifierFromTokens(
2917        ...     [
2918        ...         'outerRegion',
2919        ...         Lexeme.zoneSeparator,
2920        ...         'midRegion',
2921        ...         Lexeme.zoneSeparator,
2922        ...         'feature',
2923        ...     ],
2924        ...     0,
2925        ...     0
2926        ... )
2927        (FeatureSpecifier(domain=None, within=[], feature='outerRegion',\
2928 part=None), 0)
2929        >>> pf.parseFeatureSpecifierFromTokens(
2930        ...     [
2931        ...         'region',
2932        ...         Lexeme.zoneSeparator,
2933        ...         Lexeme.zoneSeparator,
2934        ...         'feature',
2935        ...     ]
2936        ... )
2937        (FeatureSpecifier(domain=None, within=[], feature='region',\
2938 part=None), 0)
2939        """
2940        start, limit, nTokens = normalizeEnds(tokens, start, limit)
2941
2942        if nTokens == 0:
2943            raise InvalidFeatureSpecifierError(
2944                "Can't parse a feature specifier from 0 tokens."
2945            )
2946        first = tokens[start]
2947        if isinstance(first, Lexeme):
2948            raise InvalidFeatureSpecifierError(
2949                f"Feature specifier can't begin with a special token."
2950                f"Got:\n{tokens[start:limit + 1]}"
2951            )
2952
2953        if nTokens in (1, 2):
2954            # 2 tokens isn't enough for a second part
2955            fs = base.FeatureSpecifier(
2956                domain=None,
2957                within=[],
2958                feature=first,
2959                part=None
2960            )
2961            return (base.normalizeFeatureSpecifier(fs), start)
2962
2963        firstSep = tokens[start + 1]
2964        secondPart = tokens[start + 2]
2965
2966        if (
2967            firstSep not in (
2968                Lexeme.domainSeparator,
2969                Lexeme.zoneSeparator,
2970                Lexeme.partSeparator
2971            )
2972         or not isinstance(secondPart, str)
2973        ):
2974            # Following tokens won't work out
2975            fs = base.FeatureSpecifier(
2976                domain=None,
2977                within=[],
2978                feature=first,
2979                part=None
2980            )
2981            return (base.normalizeFeatureSpecifier(fs), start)
2982
2983        if firstSep == Lexeme.domainSeparator:
2984            if start + 2 > limit:
2985                return (
2986                    base.FeatureSpecifier(
2987                        domain=first,
2988                        within=[],
2989                        feature=secondPart,
2990                        part=None
2991                    ),
2992                    start + 2
2993                )
2994            else:
2995                rest, restEnd = self.parseFeatureSpecifierFromTokens(
2996                    tokens,
2997                    start + 2,
2998                    limit
2999                )
3000                if rest.domain is not None:  # two domainSeparators in a row
3001                    fs = base.FeatureSpecifier(
3002                        domain=first,
3003                        within=[],
3004                        feature=rest.domain,
3005                        part=None
3006                    )
3007                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3008                else:
3009                    fs = base.FeatureSpecifier(
3010                        domain=first,
3011                        within=rest.within,
3012                        feature=rest.feature,
3013                        part=rest.part
3014                    )
3015                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3016
3017        elif firstSep == Lexeme.zoneSeparator:
3018            if start + 2 > limit:
3019                fs = base.FeatureSpecifier(
3020                    domain=None,
3021                    within=[first],
3022                    feature=secondPart,
3023                    part=None
3024                )
3025                return (base.normalizeFeatureSpecifier(fs), start + 2)
3026            else:
3027                rest, restEnd = self.parseFeatureSpecifierFromTokens(
3028                    tokens,
3029                    start + 2,
3030                    limit
3031                )
3032                if rest.domain is not None:  # domain sep after zone sep
3033                    fs = base.FeatureSpecifier(
3034                        domain=None,
3035                        within=[first],
3036                        feature=rest.domain,
3037                        part=None
3038                    )
3039                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3040                else:
3041                    within = [first]
3042                    within.extend(rest.within)
3043                    fs = base.FeatureSpecifier(
3044                        domain=None,
3045                        within=within,
3046                        feature=rest.feature,
3047                        part=rest.part
3048                    )
3049                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3050
3051        else:  # must be partSeparator
3052            fs = base.FeatureSpecifier(
3053                domain=None,
3054                within=[],
3055                feature=first,
3056                part=secondPart
3057            )
3058            return (base.normalizeFeatureSpecifier(fs), start + 2)
3059
3060    def parseFeatureSpecifier(self, specString: str) -> base.FeatureSpecifier:
3061        """
3062        Parses a full `FeatureSpecifier` from a single string. See
3063        `parseFeatureSpecifierFromTokens`.
3064
3065        >>> pf = ParseFormat()
3066        >>> pf.parseFeatureSpecifier('example')
3067        FeatureSpecifier(domain=None, within=[], feature='example', part=None)
3068        >>> pf.parseFeatureSpecifier('outer::example')
3069        FeatureSpecifier(domain=None, within=['outer'], feature='example',\
3070 part=None)
3071        >>> pf.parseFeatureSpecifier('example%%middle')
3072        FeatureSpecifier(domain=None, within=[], feature='example',\
3073 part='middle')
3074        >>> pf.parseFeatureSpecifier('domain//region::feature%%part')
3075        FeatureSpecifier(domain='domain', within=['region'],\
3076 feature='feature', part='part')
3077        >>> pf.parseFeatureSpecifier(
3078        ...     'outerRegion::midRegion::innerRegion::feature'
3079        ... )
3080        FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
3081 'innerRegion'], feature='feature', part=None)
3082        >>> pf.parseFeatureSpecifier('region::domain//feature')
3083        Traceback (most recent call last):
3084        ...
3085        exploration.parsing.InvalidFeatureSpecifierError...
3086        >>> pf.parseFeatureSpecifier('feature%%part1%%part2')
3087        Traceback (most recent call last):
3088        ...
3089        exploration.parsing.InvalidFeatureSpecifierError...
3090        >>> pf.parseFeatureSpecifier('domain1//domain2//feature')
3091        Traceback (most recent call last):
3092        ...
3093        exploration.parsing.InvalidFeatureSpecifierError...
3094        >>> # TODO: Issue warnings for these...
3095        >>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
3096        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3097        >>> pf.parseFeatureSpecifier('region::123')  # zone discarded
3098        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3099        >>> pf.parseFeatureSpecifier('123%%part')
3100        FeatureSpecifier(domain=None, within=[], feature=123, part='part')
3101        """
3102        tokens = self.lex(specString)
3103        result, rEnd = self.parseFeatureSpecifierFromTokens(tokens)
3104        if rEnd != len(tokens) - 1:
3105            raise InvalidFeatureSpecifierError(
3106                f"Feature specifier has extra stuff at end:"
3107                f" {tokens[rEnd + 1:]}"
3108            )
3109        else:
3110            return result
3111
3112    def normalizeFeatureSpecifier(
3113        self,
3114        spec: base.AnyFeatureSpecifier
3115    ) -> base.FeatureSpecifier:
3116        """
3117        Normalizes any kind of feature specifier into an official
3118        `FeatureSpecifier` tuple.
3119
3120        For example:
3121
3122        >>> pf = ParseFormat()
3123        >>> pf.normalizeFeatureSpecifier('town')
3124        FeatureSpecifier(domain=None, within=[], feature='town', part=None)
3125        >>> pf.normalizeFeatureSpecifier(5)
3126        FeatureSpecifier(domain=None, within=[], feature=5, part=None)
3127        >>> pf.parseFeatureSpecifierFromTokens(
3128        ...     [
3129        ...         'domain',
3130        ...         Lexeme.domainSeparator,
3131        ...         'region',
3132        ...         Lexeme.zoneSeparator,
3133        ...         'feature',
3134        ...         Lexeme.partSeparator,
3135        ...         'part'
3136        ...     ]
3137        ... )
3138        (FeatureSpecifier(domain='domain', within=['region'],\
3139 feature='feature', part='part'), 6)
3140        >>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
3141        FeatureSpecifier(domain='dom', within=['one', 'two'],\
3142 feature='three', part='middle')
3143        >>> pf.normalizeFeatureSpecifier(
3144        ...   base.FeatureSpecifier(None, ['region'], 'place', None)
3145        ... )
3146        FeatureSpecifier(domain=None, within=['region'], feature='place',\
3147 part=None)
3148        >>> fs = base.FeatureSpecifier(None, [], 'place', None)
3149        >>> ns = pf.normalizeFeatureSpecifier(fs)
3150        >>> ns is fs  # Doesn't create unnecessary clones
3151        True
3152        """
3153        if isinstance(spec, base.FeatureSpecifier):
3154            return spec
3155        elif isinstance(spec, base.FeatureID):
3156            return base.FeatureSpecifier(None, [], spec, None)
3157        elif isinstance(spec, str):
3158            return self.parseFeatureSpecifier(spec)
3159        else:
3160            raise TypeError(f"Invalid feature specifier type: '{type(spec)}'")
3161
3162    def unparseChallenge(self, challenge: base.Challenge) -> str:
3163        """
3164        Turns a `base.Challenge` into a string that can be turned back
3165        into an equivalent challenge by `parseChallenge`. For example:
3166
3167        >>> pf = ParseFormat()
3168        >>> c = base.challenge(
3169        ...     skills=base.BestSkill('brains', 'brawn'),
3170        ...     level=2,
3171        ...     success=[base.effect(set=('switch', 'on'))],
3172        ...     failure=[
3173        ...         base.effect(deactivate=True, delay=1),
3174        ...         base.effect(bounce=True)
3175        ...     ],
3176        ...     outcome=True
3177        ... )
3178        >>> r = pf.unparseChallenge(c)
3179        >>> r
3180        '<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
3181        >>> pf.parseChallenge(r) == c
3182        True
3183        >>> c2 = base.challenge(
3184        ...     skills=base.CombinedSkill(
3185        ...         -2,
3186        ...         base.ConditionalSkill(
3187        ...             base.ReqCapability('tough'),
3188        ...             base.BestSkill(1),
3189        ...             base.BestSkill(-1)
3190        ...         )
3191        ...     ),
3192        ...     level=-2,
3193        ...     success=[base.effect(gain='orb')],
3194        ...     failure=[],
3195        ...     outcome=None
3196        ... )
3197        >>> r2 = pf.unparseChallenge(c2)
3198        >>> r2
3199        '<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
3200        >>> # TODO: let this parse through without BestSkills...
3201        >>> pf.parseChallenge(r2) == c2
3202        True
3203        """
3204        lt = self.formatDict[Lexeme.angleLeft]
3205        gt = self.formatDict[Lexeme.angleRight]
3206        result = (
3207            lt + str(challenge['level']) + gt
3208          + challenge['skills'].unparse()
3209        )
3210        if challenge['outcome'] is True:
3211            result += gt
3212        result += self.unparseConsequence(challenge['success'])
3213        if challenge['outcome'] is False:
3214            result += gt
3215        result += self.unparseConsequence(challenge['failure'])
3216        return result
3217
3218    def unparseCondition(self, condition: base.Condition) -> str:
3219        """
3220        Given a `base.Condition` returns a string that would result in
3221        that condition if given to `parseCondition`. For example:
3222
3223        >>> pf = ParseFormat()
3224        >>> c = base.condition(
3225        ...     condition=base.ReqAny([
3226        ...         base.ReqCapability('brawny'),
3227        ...         base.ReqNot(base.ReqTokens('weights', 3))
3228        ...     ]),
3229        ...     consequence=[base.effect(gain='power')]
3230        ... )
3231        >>> r = pf.unparseCondition(c)
3232        >>> r
3233        '??((brawny|!(weights*3))){gain power}{}'
3234        >>> pf.parseCondition(r) == c
3235        True
3236        """
3237        return (
3238            self.formatDict[Lexeme.doubleQuestionmark]
3239          + self.formatDict[Lexeme.openParen]
3240          + condition['condition'].unparse()
3241          + self.formatDict[Lexeme.closeParen]
3242          + self.unparseConsequence(condition['consequence'])
3243          + self.unparseConsequence(condition['alternative'])
3244        )
3245
3246    def unparseConsequence(self, consequence: base.Consequence) -> str:
3247        """
3248        Given a `base.Consequence`, returns a string encoding of it,
3249        using the same format that `parseConsequence` will parse. Uses
3250        function-call-like syntax and curly braces to denote different
3251        sub-consequences. See also `SkillCombination.unparse` and
3252        `Requirement.unparse` For example:
3253
3254        >>> pf = ParseFormat()
3255        >>> c = [base.effect(gain='one'), base.effect(lose='one')]
3256        >>> pf.unparseConsequence(c)
3257        '{gain one; lose one}'
3258        >>> c = [
3259        ...     base.challenge(
3260        ...         skills=base.BestSkill('brains', 'brawn'),
3261        ...         level=2,
3262        ...         success=[base.effect(set=('switch', 'on'))],
3263        ...         failure=[
3264        ...             base.effect(deactivate=True, delay=1),
3265        ...             base.effect(bounce=True)
3266        ...         ],
3267        ...         outcome=True
3268        ...     )
3269        ... ]
3270        >>> pf.unparseConsequence(c)
3271        '{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
3272        >>> c[0]['outcome'] = False
3273        >>> pf.unparseConsequence(c)
3274        '{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
3275        >>> c[0]['outcome'] = None
3276        >>> pf.unparseConsequence(c)
3277        '{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
3278        >>> c = [
3279        ...     base.condition(
3280        ...         condition=base.ReqAny([
3281        ...             base.ReqCapability('brawny'),
3282        ...             base.ReqNot(base.ReqTokens('weights', 3))
3283        ...         ]),
3284        ...         consequence=[
3285        ...             base.challenge(
3286        ...                 skills=base.CombinedSkill('brains', 'brawn'),
3287        ...                 level=3,
3288        ...                 success=[base.effect(goto='home')],
3289        ...                 failure=[base.effect(bounce=True)],
3290        ...                 outcome=None
3291        ...             )
3292        ...         ]  # no alternative -> empty list
3293        ...     )
3294        ... ]
3295        >>> pf.unparseConsequence(c)
3296        '{??((brawny|!(weights*3))){\
3297<3>sum(brains, brawn){goto home}{bounce}}{}}'
3298        >>> c = [base.effect(gain='if(power){gain "mimic"}')]
3299        >>> # TODO: Make this work!
3300        >>> # pf.unparseConsequence(c)
3301
3302        '{gain "if(power){gain \\\\"mimic\\\\"}"}'
3303        """
3304        result = self.formatDict[Lexeme.openCurly]
3305        for item in consequence:
3306            if 'skills' in item:  # a Challenge
3307                item = cast(base.Challenge, item)
3308                result += self.unparseChallenge(item)
3309
3310            elif 'value' in item:  # an Effect
3311                item = cast(base.Effect, item)
3312                result += self.unparseEffect(item)
3313
3314            elif 'condition' in item:  # a Condition
3315                item = cast(base.Condition, item)
3316                result += self.unparseCondition(item)
3317
3318            else:  # bad dict
3319                raise TypeError(
3320                    f"Invalid consequence: items in the list must be"
3321                    f" Effects, Challenges, or Conditions (got a dictionary"
3322                    f" without 'skills', 'value', or 'condition' keys)."
3323                    f"\nGot item: {repr(item)}"
3324                )
3325            result += '; '
3326
3327        if result.endswith('; '):
3328            result = result[:-2]
3329
3330        return result + self.formatDict[Lexeme.closeCurly]
3331
3332    def parseMechanismSpecifierFromTokens(
3333        self,
3334        tokens: LexedTokens,
3335        start: int = 0
3336    ) -> Tuple[base.MechanismSpecifier, int]:
3337        """
3338        Parses a mechanism specifier starting at the specified position
3339        in the given tokens list. No ending position is specified, but
3340        instead this function returns a tuple containing the parsed
3341        `base.MechanismSpecifier` along with an index in the tokens list
3342        where the end of the specifier was found.
3343
3344        For example:
3345
3346        >>> pf = ParseFormat()
3347        >>> pf.parseMechanismSpecifierFromTokens(['m'])
3348        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3349 name='m'), 0)
3350        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
3351        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3352 name='a'), 0)
3353        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
3354        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3355 name='m'), 1)
3356        >>> pf.parseMechanismSpecifierFromTokens(
3357        ...     ['a', Lexeme.domainSeparator, 'm']
3358        ... )
3359        (MechanismSpecifier(domain='a', zone=None, decision=None,\
3360 name='m'), 2)
3361        >>> pf.parseMechanismSpecifierFromTokens(
3362        ...     ['a', Lexeme.zoneSeparator, 'm']
3363        ... )
3364        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3365 name='m'), 2)
3366        >>> pf.parseMechanismSpecifierFromTokens(
3367        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
3368        ... )
3369        (MechanismSpecifier(domain=None, zone='a', decision='b',\
3370 name='m'), 4)
3371        >>> pf.parseMechanismSpecifierFromTokens(
3372        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
3373        ... )
3374        (MechanismSpecifier(domain='a', zone=None, decision='b',\
3375 name='m'), 4)
3376        >>> pf.parseMechanismSpecifierFromTokens(
3377        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
3378        ... )
3379        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3380 name='b'), 2)
3381        >>> pf.parseMechanismSpecifierFromTokens(
3382        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3383        ...     1
3384        ... )
3385        Traceback (most recent call last):
3386        ...
3387        exploration.parsing.ParseError...
3388        >>> pf.parseMechanismSpecifierFromTokens(
3389        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3390        ...     2
3391        ... )
3392        (MechanismSpecifier(domain='b', zone=None, decision=None,\
3393 name='m'), 4)
3394        >>> pf.parseMechanismSpecifierFromTokens(
3395        ...     [
3396        ...         'a',
3397        ...         Lexeme.domainSeparator,
3398        ...         'b',
3399        ...         Lexeme.zoneSeparator,
3400        ...         'c',
3401        ...         Lexeme.zoneSeparator,
3402        ...         'm'
3403        ...     ]
3404        ... )
3405        (MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
3406        >>> pf.parseMechanismSpecifierFromTokens(
3407        ...     [
3408        ...         'a',
3409        ...         Lexeme.domainSeparator,
3410        ...         'b',
3411        ...         Lexeme.zoneSeparator,
3412        ...         'c',
3413        ...         Lexeme.zoneSeparator,
3414        ...         'm'
3415        ...     ],
3416        ...     2
3417        ... )
3418        (MechanismSpecifier(domain=None, zone='b', decision='c',\
3419 name='m'), 6)
3420        >>> pf.parseMechanismSpecifierFromTokens(
3421        ...     [
3422        ...         'a',
3423        ...         Lexeme.domainSeparator,
3424        ...         'b',
3425        ...         Lexeme.zoneSeparator,
3426        ...         'c',
3427        ...         Lexeme.zoneSeparator,
3428        ...         'm'
3429        ...     ],
3430        ...     4
3431        ... )
3432        (MechanismSpecifier(domain=None, zone=None, decision='c',\
3433 name='m'), 6)
3434        >>> pf.parseMechanismSpecifierFromTokens(
3435        ...     [
3436        ...         'roomB',
3437        ...         Lexeme.zoneSeparator,
3438        ...         'switch',
3439        ...         Lexeme.mechanismSeparator,
3440        ...         'on'
3441        ...     ]
3442        ... )
3443        (MechanismSpecifier(domain=None, zone=None, decision='roomB',\
3444 name='switch'), 2)
3445        """
3446        start, tEnd, nLeft = normalizeEnds(tokens, start, -1)
3447
3448        try:
3449            dSpec, dEnd = self.parseDecisionSpecifierFromTokens(
3450                tokens,
3451                start
3452            )
3453        except ParseError:
3454            raise ParseError(
3455                "Failed to parse mechanism specifier couldn't parse"
3456                " initial mechanism name."
3457            )
3458
3459        if isinstance(dSpec, int):
3460            raise ParseError(
3461                f"Invalid mechanism specifier: cannot use a decision ID"
3462                f" as the decision part. Got: {tokens[start:]}"
3463            )
3464            # TODO: Allow that?
3465
3466        mDomain = dSpec.domain
3467        if dEnd == tEnd or dEnd == tEnd - 1:
3468            return (
3469                base.MechanismSpecifier(
3470                    domain=mDomain,
3471                    zone=None,
3472                    decision=dSpec.zone,
3473                    name=dSpec.name
3474                ),
3475                dEnd
3476            )
3477
3478        sep = tokens[dEnd + 1]
3479        after = tokens[dEnd + 2]
3480
3481        if sep == Lexeme.zoneSeparator:
3482            if isinstance(after, Lexeme):
3483                return (
3484                    base.MechanismSpecifier(
3485                        domain=mDomain,
3486                        zone=None,
3487                        decision=dSpec.zone,
3488                        name=dSpec.name
3489                    ),
3490                    dEnd
3491                )
3492            else:
3493                return (
3494                    base.MechanismSpecifier(
3495                        domain=mDomain,
3496                        zone=dSpec.zone,
3497                        decision=dSpec.name,
3498                        name=after
3499                    ),
3500                    dEnd + 2
3501                )
3502        else:
3503            return (
3504                base.MechanismSpecifier(
3505                    domain=mDomain,
3506                    zone=None,
3507                    decision=dSpec.zone,
3508                    name=dSpec.name
3509                ),
3510                dEnd
3511            )
3512
3513    def groupReqTokens(
3514        self,
3515        tokens: LexedTokens,
3516        start: int = 0,
3517        end: int = -1
3518    ) -> GroupedTokens:
3519        """
3520        Groups tokens for a requirement, stripping out all parentheses
3521        but replacing parenthesized expressions with sub-lists of tokens.
3522
3523        For example:
3524
3525        >>> pf = ParseFormat()
3526        >>> pf.groupReqTokens(['jump'])
3527        ['jump']
3528        >>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
3529        Traceback (most recent call last):
3530        ...
3531        exploration.parsing.ParseError...
3532        >>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
3533        Traceback (most recent call last):
3534        ...
3535        exploration.parsing.ParseError...
3536        >>> pf.groupReqTokens(['jump', Lexeme.closeParen])
3537        Traceback (most recent call last):
3538        ...
3539        exploration.parsing.ParseError...
3540        >>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
3541        [['jump']]
3542        >>> pf.groupReqTokens(
3543        ...     [
3544        ...         Lexeme.openParen,
3545        ...         'jump',
3546        ...         Lexeme.orBar,
3547        ...         'climb',
3548        ...         Lexeme.closeParen,
3549        ...         Lexeme.ampersand,
3550        ...         'crawl',
3551        ...     ]
3552        ... )
3553        [['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>,\
3554 'crawl']
3555        """
3556        start, end, nTokens = normalizeEnds(tokens, start, end)
3557        if nTokens == 0:
3558            raise ParseError("Ran out of tokens.")
3559
3560        resultsStack: List[GroupedTokens] = [[]]
3561        here = start
3562        while here <= end:
3563            token = tokens[here]
3564            here += 1
3565            if token == Lexeme.closeParen:
3566                if len(resultsStack) == 1:
3567                    raise ParseError(
3568                        f"Too many closing parens at index {here - 1}"
3569                        f" in:\n{tokens[start:end + 1]}"
3570                    )
3571                else:
3572                    closed = resultsStack.pop()
3573                    resultsStack[-1].append(closed)
3574            elif token == Lexeme.openParen:
3575                resultsStack.append([])
3576            else:
3577                resultsStack[-1].append(token)
3578        if len(resultsStack) != 1:
3579            raise ParseError(
3580                f"Mismatched parentheses in tokens:"
3581                f"\n{tokens[start:end + 1]}"
3582            )
3583        return resultsStack[0]
3584
3585    def groupReqTokensByPrecedence(
3586        self,
3587        tokenGroups: GroupedTokens
3588    ) -> GroupedRequirementParts:
3589        """
3590        Re-groups requirement tokens that have been grouped using
3591        `groupReqTokens` according to operator precedence, effectively
3592        creating an equivalent result which would have been obtained by
3593        `groupReqTokens` if all possible non-redundant explicit
3594        parentheses had been included.
3595
3596        Also turns each leaf part into a `Requirement`.
3597
3598        TODO: Make this actually reasonably efficient T_T
3599
3600        Examples:
3601
3602        >>> pf = ParseFormat()
3603        >>> r = pf.parseRequirement('capability&roomB::switch:on')
3604        >>> pf.groupReqTokensByPrecedence(
3605        ...     [
3606        ...         ['jump', Lexeme.orBar, 'climb'],
3607        ...         Lexeme.ampersand,
3608        ...         Lexeme.notMarker,
3609        ...         'coin',
3610        ...         Lexeme.tokenCount,
3611        ...         '3'
3612        ...     ]
3613        ... )
3614        [\
3615[\
3616[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]],\
3617 <Lexeme.ampersand: ...>,\
3618 [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]\
3619]\
3620]
3621        """
3622        subgrouped: List[Union[Lexeme, str, GroupedRequirementParts]] = []
3623        # First recursively group all parenthesized expressions
3624        for i, item in enumerate(tokenGroups):
3625            if isinstance(item, list):
3626                subgrouped.append(self.groupReqTokensByPrecedence(item))
3627            else:
3628                subgrouped.append(item)
3629
3630        # Now process all leaf requirements
3631        leavesConverted: GroupedRequirementParts = []
3632        i = 0
3633        while i < len(subgrouped):
3634            gItem = subgrouped[i]
3635
3636            if isinstance(gItem, list):
3637                leavesConverted.append(gItem)
3638            elif isinstance(gItem, Lexeme):
3639                leavesConverted.append(gItem)
3640            elif i == len(subgrouped) - 1:
3641                if isinstance(gItem, Lexeme):
3642                    raise ParseError(
3643                        f"Lexeme at end of requirement. Grouped tokens:"
3644                        f"\n{tokenGroups}"
3645                    )
3646                else:
3647                    assert isinstance(gItem, str)
3648                    if gItem == 'X':
3649                        leavesConverted.append(base.ReqImpossible())
3650                    elif gItem == 'O':
3651                        leavesConverted.append(base.ReqNothing())
3652                    else:
3653                        leavesConverted.append(base.ReqCapability(gItem))
3654            else:
3655                assert isinstance(gItem, str)
3656                try:
3657                    # TODO: Avoid list copy here...
3658                    couldBeMechanismSpecifier: LexedTokens = []
3659                    for ii in range(i, len(subgrouped)):
3660                        lexemeOrStr = subgrouped[ii]
3661                        if isinstance(lexemeOrStr, (Lexeme, str)):
3662                            couldBeMechanismSpecifier.append(lexemeOrStr)
3663                        else:
3664                            break
3665                    mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
3666                        couldBeMechanismSpecifier
3667                    )
3668                    mEnd += i
3669                    if (
3670                        mEnd >= len(subgrouped) - 2
3671                     or subgrouped[mEnd + 1] != Lexeme.mechanismSeparator
3672                    ):
3673                        raise ParseError("Not a mechanism requirement.")
3674
3675                    mState = subgrouped[mEnd + 2]
3676                    if not isinstance(mState, base.MechanismState):
3677                        raise ParseError("Not a mechanism requirement.")
3678                    leavesConverted.append(base.ReqMechanism(mSpec, mState))
3679                    i = mEnd + 2  # + 1 will happen automatically below
3680                except ParseError:
3681                    following = subgrouped[i + 1]
3682                    if following in (
3683                        Lexeme.tokenCount,
3684                        Lexeme.mechanismSeparator,
3685                        Lexeme.wigglyLine,
3686                        Lexeme.skillLevel
3687                    ):
3688                        if (
3689                            i == len(subgrouped) - 2
3690                         or isinstance(subgrouped[i + 2], Lexeme)
3691                        ):
3692                            if following == Lexeme.wigglyLine:
3693                                # Default tag value is 1
3694                                leavesConverted.append(base.ReqTag(gItem, 1))
3695                                i += 1  # another +1 automatic below
3696                            else:
3697                                raise ParseError(
3698                                    f"Lexeme at end of requirement. Grouped"
3699                                    f" tokens:\n{tokenGroups}"
3700                                )
3701                        else:
3702                            afterwards = subgrouped[i + 2]
3703                            if not isinstance(afterwards, str):
3704                                raise ParseError(
3705                                    f"Lexeme after token/mechanism/tag/skill"
3706                                    f" separator at index {i}."
3707                                    f" Grouped tokens:\n{tokenGroups}"
3708                                )
3709                            i += 2  # another +1 automatic below
3710                            if following == Lexeme.tokenCount:
3711                                try:
3712                                    tCount = int(afterwards)
3713                                except ValueError:
3714                                    raise ParseError(
3715                                        f"Token count could not be"
3716                                        f" parsed as an integer:"
3717                                        f" {afterwards!r}. Grouped"
3718                                        f" tokens:\n{tokenGroups}"
3719                                    )
3720                                leavesConverted.append(
3721                                    base.ReqTokens(gItem, tCount)
3722                                )
3723                            elif following == Lexeme.mechanismSeparator:
3724                                leavesConverted.append(
3725                                    base.ReqMechanism(gItem, afterwards)
3726                                )
3727                            elif following == Lexeme.wigglyLine:
3728                                tVal = self.parseTagValue(afterwards)
3729                                leavesConverted.append(
3730                                    base.ReqTag(gItem, tVal)
3731                                )
3732                            else:
3733                                assert following == Lexeme.skillLevel
3734                                try:
3735                                    sLevel = int(afterwards)
3736                                except ValueError:
3737                                    raise ParseError(
3738                                        f"Skill level could not be"
3739                                        f" parsed as an integer:"
3740                                        f" {afterwards!r}. Grouped"
3741                                        f" tokens:\n{tokenGroups}"
3742                                    )
3743                                leavesConverted.append(
3744                                    base.ReqLevel(gItem, sLevel)
3745                                )
3746                    else:
3747                        if gItem == 'X':
3748                            leavesConverted.append(base.ReqImpossible())
3749                        elif gItem == 'O':
3750                            leavesConverted.append(base.ReqNothing())
3751                        else:
3752                            leavesConverted.append(
3753                                base.ReqCapability(gItem)
3754                            )
3755
3756            # Finally, increment our index:
3757            i += 1
3758
3759        # Now group all NOT operators
3760        i = 0
3761        notsGrouped: GroupedRequirementParts = []
3762        while i < len(leavesConverted):
3763            leafItem = leavesConverted[i]
3764            group = []
3765            while leafItem == Lexeme.notMarker:
3766                group.append(leafItem)
3767                i += 1
3768                if i >= len(leavesConverted):
3769                    raise ParseError(
3770                        f"NOT at end of tokens:\n{leavesConverted}"
3771                    )
3772                leafItem = leavesConverted[i]
3773            if group == []:
3774                notsGrouped.append(leafItem)
3775                i += 1
3776            else:
3777                group.append(leafItem)
3778                i += 1
3779                notsGrouped.append(group)
3780
3781        # Next group all AND operators
3782        i = 0
3783        andsGrouped: GroupedRequirementParts = []
3784        while i < len(notsGrouped):
3785            notGroupItem = notsGrouped[i]
3786            if notGroupItem == Lexeme.ampersand:
3787                if i == len(notsGrouped) - 1:
3788                    raise ParseError(
3789                        f"AND at end of group in tokens:"
3790                        f"\n{tokenGroups}"
3791                        f"Which had been grouped into:"
3792                        f"\n{notsGrouped}"
3793                    )
3794                itemAfter = notsGrouped[i + 1]
3795                if isinstance(itemAfter, Lexeme):
3796                    raise ParseError(
3797                        f"Lexeme after AND in of group in tokens:"
3798                        f"\n{tokenGroups}"
3799                        f"Which had been grouped into:"
3800                        f"\n{notsGrouped}"
3801                    )
3802                assert isinstance(itemAfter, (base.Requirement, list))
3803                prev = andsGrouped[-1]
3804                if (
3805                    isinstance(prev, list)
3806                and len(prev) > 2
3807                and prev[1] == Lexeme.ampersand
3808                ):
3809                    prev.extend(notsGrouped[i:i + 2])
3810                    i += 1  # with an extra +1 below
3811                else:
3812                    andsGrouped.append(
3813                        [andsGrouped.pop()] + notsGrouped[i:i + 2]
3814                    )
3815                    i += 1 # extra +1 below
3816            else:
3817                andsGrouped.append(notGroupItem)
3818            i += 1
3819
3820        # Finally check that we only have OR operators left over
3821        i = 0
3822        finalResult: GroupedRequirementParts = []
3823        while i < len(andsGrouped):
3824            andGroupItem = andsGrouped[i]
3825            if andGroupItem == Lexeme.orBar:
3826                if i == len(andsGrouped) - 1:
3827                    raise ParseError(
3828                        f"OR at end of group in tokens:"
3829                        f"\n{tokenGroups}"
3830                        f"Which had been grouped into:"
3831                        f"\n{andsGrouped}"
3832                    )
3833                itemAfter = andsGrouped[i + 1]
3834                if isinstance(itemAfter, Lexeme):
3835                    raise ParseError(
3836                        f"Lexeme after OR in of group in tokens:"
3837                        f"\n{tokenGroups}"
3838                        f"Which had been grouped into:"
3839                        f"\n{andsGrouped}"
3840                    )
3841                assert isinstance(itemAfter, (base.Requirement, list))
3842                prev = finalResult[-1]
3843                if (
3844                    isinstance(prev, list)
3845                and len(prev) > 2
3846                and prev[1] == Lexeme.orBar
3847                ):
3848                    prev.extend(andsGrouped[i:i + 2])
3849                    i += 1  # with an extra +1 below
3850                else:
3851                    finalResult.append(
3852                        [finalResult.pop()] + andsGrouped[i:i + 2]
3853                    )
3854                    i += 1 # extra +1 below
3855            elif isinstance(andGroupItem, Lexeme):
3856                raise ParseError(
3857                    f"Leftover lexeme when grouping ORs at index {i}"
3858                    f" in grouped tokens:\n{andsGrouped}"
3859                    f"\nOriginal tokens were:\n{tokenGroups}"
3860                )
3861            else:
3862                finalResult.append(andGroupItem)
3863            i += 1
3864
3865        return finalResult
3866
3867    def parseRequirementFromRegroupedTokens(
3868        self,
3869        reqGroups: GroupedRequirementParts
3870    ) -> base.Requirement:
3871        """
3872        Recursive parser that works once tokens have been turned into
3873        requirements at the leaves and grouped by operator precedence
3874        otherwise (see `groupReqTokensByPrecedence`).
3875
3876        TODO: Simply by just doing this while grouping... ?
3877        """
3878        if len(reqGroups) == 0:
3879            raise ParseError("Ran out of tokens.")
3880
3881        elif len(reqGroups) == 1:
3882            only = reqGroups[0]
3883            if isinstance(only, list):
3884                return self.parseRequirementFromRegroupedTokens(only)
3885            elif isinstance(only, base.Requirement):
3886                return only
3887            else:
3888                raise ParseError(f"Invalid singleton group:\n{only}")
3889        elif reqGroups[0] == Lexeme.notMarker:
3890            if (
3891                not all(x == Lexeme.notMarker for x in reqGroups[:-1])
3892             or not isinstance(reqGroups[-1], (list, base.Requirement))
3893            ):
3894                raise ParseError(f"Invalid negation group:\n{reqGroups}")
3895            result = reqGroups[-1]
3896            if isinstance(result, list):
3897                result = self.parseRequirementFromRegroupedTokens(result)
3898            assert isinstance(result, base.Requirement)
3899            for i in range(len(reqGroups) - 1):
3900                result = base.ReqNot(result)
3901            return result
3902        elif len(reqGroups) % 2 == 0:
3903            raise ParseError(f"Even-length non-negation group:\n{reqGroups}")
3904        else:
3905            if (
3906                reqGroups[1] not in (Lexeme.ampersand, Lexeme.orBar)
3907             or not all(
3908                    reqGroups[i] == reqGroups[1]
3909                    for i in range(1, len(reqGroups), 2)
3910                )
3911            ):
3912                raise ParseError(
3913                    f"Inconsistent operator(s) in group:\n{reqGroups}"
3914                )
3915            op = reqGroups[1]
3916            operands = [
3917                (
3918                    self.parseRequirementFromRegroupedTokens(x)
3919                    if isinstance(x, list)
3920                    else x
3921                )
3922                for x in reqGroups[::2]
3923            ]
3924            if not all(isinstance(x, base.Requirement) for x in operands):
3925                raise ParseError(
3926                    f"Item not reducible to Requirement in AND group:"
3927                    f"\n{reqGroups}"
3928                )
3929            reqSequence = cast(Sequence[base.Requirement], operands)
3930            if op == Lexeme.ampersand:
3931                return base.ReqAll(reqSequence).flatten()
3932            else:
3933                assert op == Lexeme.orBar
3934                return base.ReqAny(reqSequence).flatten()
3935
3936    def parseRequirementFromGroupedTokens(
3937        self,
3938        tokenGroups: GroupedTokens
3939    ) -> base.Requirement:
3940        """
3941        Parses a `base.Requirement` from a pre-grouped tokens list (see
3942        `groupReqTokens`). Uses the 'orBar', 'ampersand', 'notMarker',
3943        'tokenCount', and 'mechanismSeparator' `Lexeme`s to provide
3944        'or', 'and', and 'not' operators along with distinguishing
3945        between capabilities, tokens, and mechanisms.
3946
3947        Precedence ordering is not, then and, then or, but you are
3948        encouraged to use parentheses for explicit grouping (the
3949        'openParen' and 'closeParen' `Lexeme`s, although these must be
3950        handled by `groupReqTokens` so this function won't see them
3951        directly).
3952
3953        You can also use 'X' (without quotes) for a never-satisfied
3954        requirement, and 'O' (without quotes) for an always-satisfied
3955        requirement.
3956
3957        Note that when '!' is applied to a token requirement it flips
3958        the sense of the integer from 'must have at least this many' to
3959        'must have strictly less than this many'.
3960
3961        Raises a `ParseError` if the grouped tokens it is given cannot
3962        be parsed as a `Requirement`.
3963
3964        Examples:
3965
3966        >>> pf = ParseFormat()
3967        >>> pf.parseRequirementFromGroupedTokens(['capability'])
3968        ReqCapability('capability')
3969        >>> pf.parseRequirementFromGroupedTokens(
3970        ...     ['token', Lexeme.tokenCount, '3']
3971        ... )
3972        ReqTokens('token', 3)
3973        >>> pf.parseRequirementFromGroupedTokens(
3974        ...     ['mechanism', Lexeme.mechanismSeparator, 'state']
3975        ... )
3976        ReqMechanism('mechanism', 'state')
3977        >>> pf.parseRequirementFromGroupedTokens(
3978        ...     ['capability', Lexeme.orBar, 'token',
3979        ...      Lexeme.tokenCount, '3']
3980        ... )
3981        ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
3982        >>> pf.parseRequirementFromGroupedTokens(
3983        ...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
3984        ... )
3985        ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]),\
3986 ReqCapability('three')])
3987        >>> pf.parseRequirementFromGroupedTokens(
3988        ...     [
3989        ...         'one',
3990        ...         Lexeme.ampersand,
3991        ...         [
3992        ...              'two',
3993        ...              Lexeme.orBar,
3994        ...              'three'
3995        ...         ]
3996        ...     ]
3997        ... )
3998        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
3999 ReqCapability('three')])])
4000        >>> pf.parseRequirementFromTokens(['X'])
4001        ReqImpossible()
4002        >>> pf.parseRequirementFromTokens(['O'])
4003        ReqNothing()
4004        >>> pf.parseRequirementFromTokens(
4005        ...     [Lexeme.openParen, 'O', Lexeme.closeParen]
4006        ... )
4007        ReqNothing()
4008        """
4009        if len(tokenGroups) == 0:
4010            raise ParseError("Ran out of tokens.")
4011
4012        reGrouped = self.groupReqTokensByPrecedence(tokenGroups)
4013
4014        return self.parseRequirementFromRegroupedTokens(reGrouped)
4015
4016    def parseRequirementFromTokens(
4017        self,
4018        tokens: LexedTokens,
4019        start: int = 0,
4020        end: int = -1
4021    ) -> base.Requirement:
4022        """
4023        Parses a requirement from `LexedTokens` by grouping them first
4024        and then using `parseRequirementFromGroupedTokens`.
4025
4026        For example:
4027
4028        >>> pf = ParseFormat()
4029        >>> pf.parseRequirementFromTokens(
4030        ...     [
4031        ...         'one',
4032        ...         Lexeme.ampersand,
4033        ...         Lexeme.openParen,
4034        ...         'two',
4035        ...         Lexeme.orBar,
4036        ...         'three',
4037        ...         Lexeme.closeParen
4038        ...     ]
4039        ... )
4040        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4041 ReqCapability('three')])])
4042        """
4043        grouped = self.groupReqTokens(tokens, start, end)
4044        return self.parseRequirementFromGroupedTokens(grouped)
4045
4046    def parseRequirement(self, encoded: str) -> base.Requirement:
4047        """
4048        Parses a `base.Requirement` from a string by calling `lex` and
4049        then feeding it into `ParseFormat.parseRequirementFromTokens`.
4050        As stated in `parseRequirementFromTokens`, the precedence
4051        binding order is NOT, then AND, then OR.
4052
4053        For example:
4054
4055        >>> pf = ParseFormat()
4056        >>> pf.parseRequirement('! coin * 3')
4057        ReqNot(ReqTokens('coin', 3))
4058        >>> pf.parseRequirement(
4059        ...     '  oneWord | "two words"|"three  words words" '
4060        ... )
4061        ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'),\
4062 ReqCapability('"three  words words"')])
4063        >>> pf.parseRequirement('words-with-dashes')
4064        ReqCapability('words-with-dashes')
4065        >>> r = pf.parseRequirement('capability&roomB::switch:on')
4066        >>> r
4067        ReqAll([ReqCapability('capability'),\
4068 ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB',\
4069 name='switch'), 'on')])
4070        >>> r.unparse()
4071        '(capability&roomB::switch:on)'
4072        >>> pf.parseRequirement('!!!one')
4073        ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
4074        >>> pf.parseRequirement('domain//zone::where::mechanism:state')
4075        ReqMechanism(MechanismSpecifier(domain='domain', zone='zone',\
4076 decision='where', name='mechanism'), 'state')
4077        >>> pf.parseRequirement('domain//mechanism:state')
4078        ReqMechanism(MechanismSpecifier(domain='domain', zone=None,\
4079 decision=None, name='mechanism'), 'state')
4080        >>> pf.parseRequirement('where::mechanism:state')
4081        ReqMechanism(MechanismSpecifier(domain=None, zone=None,\
4082 decision='where', name='mechanism'), 'state')
4083        >>> pf.parseRequirement('zone::where::mechanism:state')
4084        ReqMechanism(MechanismSpecifier(domain=None, zone='zone',\
4085 decision='where', name='mechanism'), 'state')
4086        >>> pf.parseRequirement('tag~')
4087        ReqTag('tag', 1)
4088        >>> pf.parseRequirement('tag~&tag2~')
4089        ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
4090        >>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
4091        ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3),\
4092 ReqTag('tag', 3.5), ReqLevel('skill', 3)])
4093        >>> pf.parseRequirement('tag~True|tag~False|tag~None')
4094        ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])
4095
4096        Precedence examples:
4097
4098        >>> pf.parseRequirement('A|B&C')
4099        ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'),\
4100 ReqCapability('C')])])
4101        >>> pf.parseRequirement('A&B|C')
4102        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4103 ReqCapability('C')])
4104        >>> pf.parseRequirement('(A&B)|C')
4105        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4106 ReqCapability('C')])
4107        >>> pf.parseRequirement('(A&B|C)&D')
4108        ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4109 ReqCapability('C')]), ReqCapability('D')])
4110
4111        Error examples:
4112
4113        >>> pf.parseRequirement('one ! Word')
4114        Traceback (most recent call last):
4115        ...
4116        exploration.parsing.ParseError...
4117        >>> pf.parseRequirement('a|')
4118        Traceback (most recent call last):
4119        ...
4120        exploration.parsing.ParseError...
4121        >>> pf.parseRequirement('b!')
4122        Traceback (most recent call last):
4123        ...
4124        exploration.parsing.ParseError...
4125        >>> pf.parseRequirement('*emph*')
4126        Traceback (most recent call last):
4127        ...
4128        exploration.parsing.ParseError...
4129        >>> pf.parseRequirement('one&&two')
4130        Traceback (most recent call last):
4131        ...
4132        exploration.parsing.ParseError...
4133        >>> pf.parseRequirement('one!|two')
4134        Traceback (most recent call last):
4135        ...
4136        exploration.parsing.ParseError...
4137        >>> pf.parseRequirement('one*two')
4138        Traceback (most recent call last):
4139        ...
4140        exploration.parsing.ParseError...
4141        >>> pf.parseRequirement('one*')
4142        Traceback (most recent call last):
4143        ...
4144        exploration.parsing.ParseError...
4145        >>> pf.parseRequirement('()')
4146        Traceback (most recent call last):
4147        ...
4148        exploration.parsing.ParseError...
4149        >>> pf.parseRequirement('(one)*3')
4150        Traceback (most recent call last):
4151        ...
4152        exploration.parsing.ParseError...
4153        >>> pf.parseRequirement('a:')
4154        Traceback (most recent call last):
4155        ...
4156        exploration.parsing.ParseError...
4157        >>> pf.parseRequirement('a:b:c')
4158        Traceback (most recent call last):
4159        ...
4160        exploration.parsing.ParseError...
4161        >>> pf.parseRequirement('where::capability')
4162        Traceback (most recent call last):
4163        ...
4164        exploration.parsing.ParseError...
4165        """
4166        return self.parseRequirementFromTokens(
4167            lex(encoded, self.reverseFormat)
4168        )
4169
4170    def parseSkillCombinationFromTokens(
4171        self,
4172        tokens: LexedTokens,
4173        start: int = 0,
4174        end: int = -1
4175    ) -> Union[base.Skill, base.SkillCombination]:
4176        """
4177        Parses a skill combination from the specified range within the
4178        given tokens list. If just a single string token is selected, it
4179        will be returned as a `base.BestSkill` with just that skill
4180        inside.
4181
4182        For example:
4183
4184        >>> pf = ParseFormat()
4185        >>> pf.parseSkillCombinationFromTokens(['climbing'])
4186        BestSkill('climbing')
4187        >>> tokens = [
4188        ...     'best',
4189        ...     Lexeme.openParen,
4190        ...     'brains',
4191        ...     Lexeme.sepOrDelay,
4192        ...     'brawn',
4193        ...     Lexeme.closeParen,
4194        ... ]
4195        >>> pf.parseSkillCombinationFromTokens(tokens)
4196        BestSkill('brains', 'brawn')
4197        >>> tokens[2] = '3'  # not a lexeme so it's a string
4198        >>> pf.parseSkillCombinationFromTokens(tokens)
4199        BestSkill(3, 'brawn')
4200        >>> tokens = [
4201        ...     Lexeme.wigglyLine,
4202        ...     Lexeme.wigglyLine,
4203        ...     'yes',
4204        ... ]
4205        >>> pf.parseSkillCombinationFromTokens(tokens)
4206        InverseSkill(InverseSkill('yes'))
4207        """
4208        start, end, nTokens = normalizeEnds(tokens, start, end)
4209
4210        first = tokens[start]
4211        if nTokens == 1:
4212            if isinstance(first, base.Skill):
4213                try:
4214                    level = int(first)
4215                    return base.BestSkill(level)
4216                except ValueError:
4217                    return base.BestSkill(first)
4218            else:
4219                raise ParseError(
4220                    "Invalid SkillCombination:\n{tokens[start:end + 1]"
4221                )
4222
4223        if first == Lexeme.wigglyLine:
4224            inv = self.parseSkillCombinationFromTokens(
4225                tokens,
4226                start + 1,
4227                end
4228            )
4229            if isinstance(inv, base.BestSkill) and len(inv.skills) == 1:
4230                return base.InverseSkill(inv.skills[0])
4231            else:
4232                return base.InverseSkill(inv)
4233
4234        second = tokens[start + 1]
4235        if second != Lexeme.openParen:
4236            raise ParseError(
4237                f"Invalid SkillCombination (missing paren):"
4238                f"\n{tokens[start:end + 1]}"
4239            )
4240
4241        parenEnd = self.matchingBrace(
4242            tokens,
4243            start + 1,
4244            Lexeme.openParen,
4245            Lexeme.closeParen
4246        )
4247        if parenEnd != end:
4248            raise ParseError(
4249                f"Extra junk after SkillCombination:"
4250                f"\n{tokens[parenEnd + 1:end + 1]}"
4251            )
4252
4253        if first == 'if':
4254            parts = list(
4255                findSeparatedParts(
4256                    tokens,
4257                    Lexeme.sepOrDelay,
4258                    start + 2,
4259                    end - 1,
4260                    Lexeme.openParen,
4261                    Lexeme.closeParen
4262                )
4263            )
4264            if len(parts) != 3:
4265                raise ParseError(
4266                    f"Wrong number of parts for ConditionalSkill (needs"
4267                    f" 3, got {len(parts)}:"
4268                    f"\n{tokens[start + 2:end]}"
4269                )
4270            reqStart, reqEnd = parts[0]
4271            ifStart, ifEnd = parts[1]
4272            elseStart, elseEnd = parts[2]
4273            return base.ConditionalSkill(
4274                self.parseRequirementFromTokens(tokens, reqStart, reqEnd),
4275                self.parseSkillCombinationFromTokens(tokens, ifStart, ifEnd),
4276                self.parseSkillCombinationFromTokens(
4277                    tokens,
4278                    elseStart,
4279                    elseEnd
4280                ),
4281            )
4282        elif first in ('sum', 'best', 'worst'):
4283            make: type[base.SkillCombination]
4284            if first == 'sum':
4285                make = base.CombinedSkill
4286            elif first == 'best':
4287                make = base.BestSkill
4288            else:
4289                make = base.WorstSkill
4290
4291            subs = []
4292            for partStart, partEnd in findSeparatedParts(
4293                tokens,
4294                Lexeme.sepOrDelay,
4295                start + 2,
4296                end - 1,
4297                Lexeme.openParen,
4298                Lexeme.closeParen
4299            ):
4300                sub = self.parseSkillCombinationFromTokens(
4301                    tokens,
4302                    partStart,
4303                    partEnd
4304                )
4305                if (
4306                    isinstance(sub, base.BestSkill)
4307                and len(sub.skills) == 1
4308                ):
4309                    subs.append(sub.skills[0])
4310                else:
4311                    subs.append(sub)
4312
4313            return make(*subs)
4314        else:
4315            raise ParseError(
4316                "Invalid SkillCombination:\n{tokens[start:end + 1]"
4317            )
4318
4319    def parseSkillCombination(
4320        self,
4321        encoded: str
4322    ) -> base.SkillCombination:
4323        """
4324        Parses a `SkillCombination` from a string. Calls `lex` and then
4325        `parseSkillCombinationFromTokens`.
4326        """
4327        result = self.parseSkillCombinationFromTokens(
4328            lex(encoded, self.reverseFormat)
4329        )
4330        if not isinstance(result, base.SkillCombination):
4331            return base.BestSkill(result)
4332        else:
4333            return result
4334
4335    def parseConditionFromTokens(
4336        self,
4337        tokens: LexedTokens,
4338        start: int = 0,
4339        end: int = -1
4340    ) -> base.Condition:
4341        """
4342        Parses a `base.Condition` from a lexed tokens list. For example:
4343
4344        >>> pf = ParseFormat()
4345        >>> tokens = [
4346        ...     Lexeme.doubleQuestionmark,
4347        ...     Lexeme.openParen,
4348        ...     "fire",
4349        ...     Lexeme.ampersand,
4350        ...     "water",
4351        ...     Lexeme.closeParen,
4352        ...     Lexeme.openCurly,
4353        ...     "gain",
4354        ...     "wind",
4355        ...     Lexeme.closeCurly,
4356        ...     Lexeme.openCurly,
4357        ...     Lexeme.closeCurly,
4358        ... ]
4359        >>> pf.parseConditionFromTokens(tokens) == base.condition(
4360        ...     condition=base.ReqAll([
4361        ...         base.ReqCapability('fire'),
4362        ...         base.ReqCapability('water')
4363        ...     ]),
4364        ...     consequence=[base.effect(gain='wind')]
4365        ... )
4366        True
4367        """
4368        start, end, nTokens = normalizeEnds(tokens, start, end)
4369        if nTokens < 8:
4370            raise ParseError(
4371                f"A Condition requires at least 8 tokens (got {nTokens})."
4372            )
4373        if tokens[start] != Lexeme.doubleQuestionmark:
4374            raise ParseError(
4375                f"A Condition must start with"
4376                f" {repr(self.formatDict[Lexeme.doubleQuestionmark])}"
4377            )
4378        try:
4379            consequenceStart = tokens.index(Lexeme.openCurly, start)
4380        except ValueError:
4381            raise ParseError("A condition must include a consequence block.")
4382        consequenceEnd = self.matchingBrace(tokens, consequenceStart)
4383        altStart = consequenceEnd + 1
4384        altEnd = self.matchingBrace(tokens, altStart)
4385
4386        if altEnd != end:
4387            raise ParseError(
4388                f"Junk after condition:\n{tokens[altEnd + 1: end + 1]}"
4389            )
4390
4391        return base.condition(
4392            condition=self.parseRequirementFromTokens(
4393                tokens,
4394                start + 1,
4395                consequenceStart - 1
4396            ),
4397            consequence=self.parseConsequenceFromTokens(
4398                tokens,
4399                consequenceStart,
4400                consequenceEnd
4401            ),
4402            alternative=self.parseConsequenceFromTokens(
4403                tokens,
4404                altStart,
4405                altEnd
4406            )
4407        )
4408
4409    def parseCondition(
4410        self,
4411        encoded: str
4412    ) -> base.Condition:
4413        """
4414        Lexes the given string and then calls `parseConditionFromTokens`
4415        to return a `base.Condition`.
4416        """
4417        return self.parseConditionFromTokens(
4418            lex(encoded, self.reverseFormat)
4419        )
4420
4421    def parseChallengeFromTokens(
4422        self,
4423        tokens: LexedTokens,
4424        start: int = 0,
4425        end: int = -1
4426    ) -> base.Challenge:
4427        """
4428        Parses a `base.Challenge` from a lexed tokens list.
4429
4430        For example:
4431
4432        >>> pf = ParseFormat()
4433        >>> tokens = [
4434        ...     Lexeme.angleLeft,
4435        ...     '2',
4436        ...     Lexeme.angleRight,
4437        ...     'best',
4438        ...     Lexeme.openParen,
4439        ...     "chess",
4440        ...     Lexeme.sepOrDelay,
4441        ...     "checkers",
4442        ...     Lexeme.closeParen,
4443        ...     Lexeme.openCurly,
4444        ...     "gain",
4445        ...     "coin",
4446        ...     Lexeme.tokenCount,
4447        ...     "5",
4448        ...     Lexeme.closeCurly,
4449        ...     Lexeme.angleRight,
4450        ...     Lexeme.openCurly,
4451        ...     "lose",
4452        ...     "coin",
4453        ...     Lexeme.tokenCount,
4454        ...     "5",
4455        ...     Lexeme.closeCurly,
4456        ... ]
4457        >>> c = pf.parseChallengeFromTokens(tokens)
4458        >>> c['skills'] == base.BestSkill('chess', 'checkers')
4459        True
4460        >>> c['level']
4461        2
4462        >>> c['success'] == [base.effect(gain=('coin', 5))]
4463        True
4464        >>> c['failure'] == [base.effect(lose=('coin', 5))]
4465        True
4466        >>> c['outcome']
4467        False
4468        >>> c == base.challenge(
4469        ...     skills=base.BestSkill('chess', 'checkers'),
4470        ...     level=2,
4471        ...     success=[base.effect(gain=('coin', 5))],
4472        ...     failure=[base.effect(lose=('coin', 5))],
4473        ...     outcome=False
4474        ... )
4475        True
4476        >>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
4477        >>> c == pf.parseChallengeFromTokens(t2, 1, -2)
4478        True
4479        """
4480        start, end, nTokens = normalizeEnds(tokens, start, end)
4481        if nTokens < 8:
4482            raise ParseError(
4483                f"Not enough tokens for a challenge: {nTokens}"
4484            )
4485        if tokens[start] != Lexeme.angleLeft:
4486            raise ParseError(
4487                f"Challenge must start with"
4488                f" {repr(self.formatDict[Lexeme.angleLeft])}"
4489            )
4490        levelStr = tokens[start + 1]
4491        if isinstance(levelStr, Lexeme):
4492            raise ParseError(
4493                f"Challenge must start with a level in angle brackets"
4494                f" (got {repr(self.formatDict[levelStr])})."
4495            )
4496        if tokens[start + 2] != Lexeme.angleRight:
4497            raise ParseError(
4498                f"Challenge must include"
4499                f" {repr(self.formatDict[Lexeme.angleRight])} after"
4500                f" the level."
4501            )
4502        try:
4503            level = int(levelStr)
4504        except ValueError:
4505            raise ParseError(
4506                f"Challenge level must be an integer (got"
4507                f" {repr(tokens[start + 1])}."
4508            )
4509        try:
4510            successStart = tokens.index(Lexeme.openCurly, start)
4511            skillsEnd = successStart - 1
4512        except ValueError:
4513            raise ParseError("A challenge must include a consequence block.")
4514
4515        outcome: Optional[bool] = None
4516        if tokens[skillsEnd] == Lexeme.angleRight:
4517            skillsEnd -= 1
4518            outcome = True
4519        successEnd = self.matchingBrace(tokens, successStart)
4520        failStart = successEnd + 1
4521        if tokens[failStart] == Lexeme.angleRight:
4522            failStart += 1
4523            if outcome is not None:
4524                raise ParseError(
4525                    "Cannot indicate both success and failure as"
4526                    " outcomes in a challenge."
4527                )
4528            outcome = False
4529        failEnd = self.matchingBrace(tokens, failStart)
4530
4531        if failEnd != end:
4532            raise ParseError(
4533                f"Junk after condition:\n{tokens[failEnd + 1:end + 1]}"
4534            )
4535
4536        skills = self.parseSkillCombinationFromTokens(
4537            tokens,
4538            start + 3,
4539            skillsEnd
4540        )
4541        if isinstance(skills, base.Skill):
4542            skills = base.BestSkill(skills)
4543
4544        return base.challenge(
4545            level=level,
4546            outcome=outcome,
4547            skills=skills,
4548            success=self.parseConsequenceFromTokens(
4549                tokens[successStart:successEnd + 1]
4550            ),
4551            failure=self.parseConsequenceFromTokens(
4552                tokens[failStart:failEnd + 1]
4553            )
4554        )
4555
4556    def parseChallenge(
4557        self,
4558        encoded: str
4559    ) -> base.Challenge:
4560        """
4561        Lexes the given string and then calls `parseChallengeFromTokens`
4562        to return a `base.Challenge`.
4563        """
4564        return self.parseChallengeFromTokens(
4565            lex(encoded, self.reverseFormat)
4566        )
4567
4568    def parseConsequenceFromTokens(
4569        self,
4570        tokens: LexedTokens,
4571        start: int = 0,
4572        end: int = -1
4573    ) -> base.Consequence:
4574        """
4575        Parses a consequence from a lexed token list. If start and/or end
4576        are specified, only processes the part of the list between those
4577        two indices (inclusive). Use `lex` to turn a string into a
4578        `LexedTokens` list (or use `ParseFormat.parseConsequence` which
4579        does that for you).
4580
4581        An example:
4582
4583        >>> pf = ParseFormat()
4584        >>> tokens = [
4585        ...     Lexeme.openCurly,
4586        ...     'gain',
4587        ...     'power',
4588        ...     Lexeme.closeCurly
4589        ... ]
4590        >>> c = pf.parseConsequenceFromTokens(tokens)
4591        >>> c == [base.effect(gain='power')]
4592        True
4593        >>> tokens.append('hi')
4594        >>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
4595        True
4596        >>> c == pf.parseConsequenceFromTokens(tokens, end=3)
4597        True
4598        """
4599        start, end, nTokens = normalizeEnds(tokens, start, end)
4600
4601        if nTokens < 2:
4602            raise ParseError("Consequence must have at least two tokens.")
4603
4604        if tokens[start] != Lexeme.openCurly:
4605            raise ParseError(
4606                f"Consequence must start with an open curly brace:"
4607                f" {repr(self.formatDict[Lexeme.openCurly])}."
4608            )
4609
4610        if tokens[end] != Lexeme.closeCurly:
4611            raise ParseError(
4612                f"Consequence must end with a closing curly brace:"
4613                f" {repr(self.formatDict[Lexeme.closeCurly])}."
4614            )
4615
4616        if nTokens == 2:
4617            return []
4618
4619        result: base.Consequence = []
4620        for partStart, partEnd in findSeparatedParts(
4621            tokens,
4622            Lexeme.consequenceSeparator,
4623            start + 1,
4624            end - 1,
4625            Lexeme.openCurly,
4626            Lexeme.closeCurly
4627        ):
4628            if partEnd - partStart < 0:
4629                raise ParseError("Empty consequence part.")
4630            if tokens[partStart] == Lexeme.angleLeft:  # a challenge
4631                result.append(
4632                    self.parseChallengeFromTokens(
4633                        tokens,
4634                        partStart,
4635                        partEnd
4636                    )
4637                )
4638            elif tokens[partStart] == Lexeme.doubleQuestionmark:  # condition
4639                result.append(
4640                    self.parseConditionFromTokens(
4641                        tokens,
4642                        partStart,
4643                        partEnd
4644                    )
4645                )
4646            else:  # Must be an effect
4647                result.append(
4648                    self.parseEffectFromTokens(
4649                        tokens,
4650                        partStart,
4651                        partEnd
4652                    )
4653                )
4654
4655        return result
4656
4657    def parseConsequence(self, encoded: str) -> base.Consequence:
4658        """
4659        Parses a consequence from a string. Uses `lex` and
4660        `ParseFormat.parseConsequenceFromTokens`. For example:
4661
4662        >>> pf = ParseFormat()
4663        >>> c = pf.parseConsequence(
4664        ...   '{gain power}'
4665        ... )
4666        >>> c == [base.effect(gain='power')]
4667        True
4668        >>> pf.unparseConsequence(c)
4669        '{gain power}'
4670        >>> c = pf.parseConsequence(
4671        ...     '{\\n'
4672        ...     '    ??(brawny|!weights*3){\\n'
4673        ...     '        <3>sum(brains, brawn){goto home}>{bounce}\\n'
4674        ...     '    }{};\\n'
4675        ...     '    lose coin*1\\n'
4676        ...     '}'
4677        ... )
4678        >>> len(c)
4679        2
4680        >>> c[0]['condition'] == base.ReqAny([
4681        ...     base.ReqCapability('brawny'),
4682        ...     base.ReqNot(base.ReqTokens('weights', 3))
4683        ... ])
4684        True
4685        >>> len(c[0]['consequence'])
4686        1
4687        >>> len(c[0]['alternative'])
4688        0
4689        >>> cons = c[0]['consequence'][0]
4690        >>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
4691        True
4692        >>> cons['level']
4693        3
4694        >>> len(cons['success'])
4695        1
4696        >>> len(cons['failure'])
4697        1
4698        >>> cons['success'][0] == base.effect(goto='home')
4699        True
4700        >>> cons['failure'][0] == base.effect(bounce=True)
4701        True
4702        >>> cons['outcome'] = False
4703        >>> c[0] == base.condition(
4704        ...     condition=base.ReqAny([
4705        ...         base.ReqCapability('brawny'),
4706        ...         base.ReqNot(base.ReqTokens('weights', 3))
4707        ...     ]),
4708        ...     consequence=[
4709        ...         base.challenge(
4710        ...             skills=base.CombinedSkill('brains', 'brawn'),
4711        ...             level=3,
4712        ...             success=[base.effect(goto='home')],
4713        ...             failure=[base.effect(bounce=True)],
4714        ...             outcome=False
4715        ...         )
4716        ...     ]
4717        ... )
4718        True
4719        >>> c[1] == base.effect(lose=('coin', 1))
4720        True
4721        """
4722        return self.parseConsequenceFromTokens(
4723            lex(encoded, self.reverseFormat)
4724        )
4725
4726
4727#---------------------#
4728# Graphviz dot format #
4729#---------------------#
4730
4731class ParsedDotGraph(TypedDict):
4732    """
4733    Represents a parsed `graphviz` dot-format graph consisting of nodes,
4734    edges, and subgraphs, with attributes attached to nodes and/or
4735    edges. An intermediate format during conversion to a full
4736    `DecisionGraph`. Includes the following slots:
4737
4738    - `'nodes'`: A list of tuples each holding a node ID followed by a
4739        list of name/value attribute pairs.
4740    - `'edges'`: A list of tuples each holding a from-ID, a to-ID,
4741        and then a list of name/value attribute pairs.
4742    - `'attrs'`: A list of tuples each holding a name/value attribute
4743        pair for graph-level attributes.
4744    - `'subgraphs'`: A list of subgraphs (each a tuple with a subgraph
4745        name and then another dictionary in the same format as this
4746        one).
4747    """
4748    nodes: List[Tuple[int, List[Tuple[str, str]]]]
4749    edges: List[Tuple[int, int, List[Tuple[str, str]]]]
4750    attrs: List[Tuple[str, str]]
4751    subgraphs: List[Tuple[str, 'ParsedDotGraph']]
4752
4753
4754def parseSimpleDotAttrs(fragment: str) -> List[Tuple[str, str]]:
4755    """
4756    Given a string fragment that starts with '[' and ends with ']',
4757    parses a simple attribute list in `graphviz` dot format from that
4758    fragment, returning a list of name/value attribute tuples. Raises a
4759    `DotParseError` if the fragment doesn't have the right format.
4760
4761    Examples:
4762
4763    >>> parseSimpleDotAttrs('[ name=value ]')
4764    [('name', 'value')]
4765    >>> parseSimpleDotAttrs('[ a=b c=d e=f ]')
4766    [('a', 'b'), ('c', 'd'), ('e', 'f')]
4767    >>> parseSimpleDotAttrs('[ a=b "c d"="e f" ]')
4768    [('a', 'b'), ('c d', 'e f')]
4769    >>> parseSimpleDotAttrs('[a=b "c d"="e f"]')
4770    [('a', 'b'), ('c d', 'e f')]
4771    >>> parseSimpleDotAttrs('[ a=b "c d"="e f"')
4772    Traceback (most recent call last):
4773    ...
4774    exploration.parsing.DotParseError...
4775    >>> parseSimpleDotAttrs('a=b "c d"="e f" ]')
4776    Traceback (most recent call last):
4777    ...
4778    exploration.parsing.DotParseError...
4779    >>> parseSimpleDotAttrs('[ a b=c ]')
4780    Traceback (most recent call last):
4781    ...
4782    exploration.parsing.DotParseError...
4783    >>> parseSimpleDotAttrs('[ a=b c ]')
4784    Traceback (most recent call last):
4785    ...
4786    exploration.parsing.DotParseError...
4787    >>> parseSimpleDotAttrs('[ name="value" ]')
4788    [('name', 'value')]
4789    >>> parseSimpleDotAttrs('[ name="\\\\"value\\\\"" ]')
4790    [('name', '"value"')]
4791    """
4792    if not fragment.startswith('[') or not fragment.endswith(']'):
4793        raise DotParseError(
4794            f"Simple attrs fragment missing delimiters:"
4795            f"\n  {repr(fragment)}"
4796        )
4797    result = []
4798    rest = fragment[1:-1].strip()
4799    while rest:
4800        # Get possibly-quoted attribute name:
4801        if rest.startswith('"'):
4802            try:
4803                aName, rest = utils.unquoted(rest)
4804            except ValueError:
4805                raise DotParseError(
4806                    f"Malformed quoted attribute name in"
4807                    f" fragment:\n  {repr(fragment)}"
4808                )
4809            rest = rest.lstrip()
4810            if not rest.startswith('='):
4811                raise DotParseError(
4812                    f"Missing '=' in attribute block in"
4813                    f" fragment:\n  {repr(fragment)}"
4814                )
4815            rest = rest[1:].lstrip()
4816        else:
4817            try:
4818                eqInd = rest.index('=')
4819            except ValueError:
4820                raise DotParseError(
4821                    f"Missing '=' in attribute block in"
4822                    f" fragment:\n  {repr(fragment)}"
4823                )
4824            aName = rest[:eqInd]
4825            if ' ' in aName:
4826                raise DotParseError(
4827                    f"Malformed unquoted attribute name"
4828                    f" {repr(aName)} in fragment:"
4829                    f"\n  {repr(fragment)}"
4830                )
4831            rest = rest[eqInd + 1:].lstrip()
4832
4833        # Get possibly-quoted attribute value:
4834        if rest.startswith('"'):
4835            try:
4836                aVal, rest = utils.unquoted(rest)
4837            except ValueError:
4838                raise DotParseError(
4839                    f"Malformed quoted attribute value in"
4840                    f" fragment:\n  {repr(fragment)}"
4841                )
4842            rest = rest.lstrip()
4843        else:
4844            try:
4845                spInd = rest.index(' ')
4846            except ValueError:
4847                spInd = len(rest)
4848            aVal = rest[:spInd]
4849            rest = rest[spInd:].lstrip()
4850
4851        # Append this attribute pair and continue parsing
4852        result.append((aName, aVal))
4853
4854    return result
4855
4856
4857def parseDotNode(
4858    nodeLine: str
4859) -> Tuple[int, Union[bool, List[Tuple[str, str]]]]:
4860    """
4861    Given a line of text from a `graphviz` dot-format graph
4862    (possibly ending in an '[' to indicate attributes to follow, or
4863    possible including a '[ ... ]' block with attributes in-line),
4864    parses it as a node declaration, returning the ID of the node,
4865    along with a boolean indicating whether attributes follow or
4866    not. If an inline attribute block is present, the second member
4867    of the tuple will be a list of attribute name/value pairs. In
4868    that case, all attribute names and values must either be quoted
4869    or not include spaces.
4870    Examples:
4871
4872    >>> parseDotNode('1')
4873    (1, False)
4874    >>> parseDotNode(' 1 [ ')
4875    (1, True)
4876    >>> parseDotNode(' 1 [ a=b "c d"="e f" ] ')
4877    (1, [('a', 'b'), ('c d', 'e f')])
4878    >>> parseDotNode(' 3 [ name="A = \\\\"grate:open\\\\"" ]')
4879    (3, [('name', 'A = "grate:open"')])
4880    >>> parseDotNode('  "1"[')
4881    (1, True)
4882    >>> parseDotNode('  100[')
4883    (100, True)
4884    >>> parseDotNode('  1 2')
4885    Traceback (most recent call last):
4886    ...
4887    exploration.parsing.DotParseError...
4888    >>> parseDotNode('  1 [ 2')
4889    Traceback (most recent call last):
4890    ...
4891    exploration.parsing.DotParseError...
4892    >>> parseDotNode('  1 2')
4893    Traceback (most recent call last):
4894    ...
4895    exploration.parsing.DotParseError...
4896    >>> parseDotNode('  1 [ junk not=attrs ]')
4897    Traceback (most recent call last):
4898    ...
4899    exploration.parsing.DotParseError...
4900    >>> parseDotNode('  \\n')
4901    Traceback (most recent call last):
4902    ...
4903    exploration.parsing.DotParseError...
4904    """
4905    stripped = nodeLine.strip()
4906    if len(stripped) == 0:
4907        raise DotParseError(
4908            "Empty node in dot graph on line:\n  {repr(nodeLine)}"
4909        )
4910    hasAttrs: Union[bool, List[Tuple[str, str]]] = False
4911    if stripped.startswith('"'):
4912        nodeName, rest = utils.unquoted(stripped)
4913        rest = rest.strip()
4914        if rest == '[':
4915            hasAttrs = True
4916        elif rest.startswith('[') and rest.endswith(']'):
4917            hasAttrs = parseSimpleDotAttrs(rest)
4918        elif rest:
4919            raise DotParseError(
4920                f"Extra junk {repr(rest)} after node on line:"
4921                f"\n {repr(nodeLine)}"
4922            )
4923
4924    else:
4925        if stripped.endswith('['):
4926            hasAttrs = True
4927            stripped = stripped[:-1].rstrip()
4928        elif stripped.endswith(']'):
4929            try:
4930                # TODO: Why did this used to be rindex? Was that
4931                # important in some case? (That doesn't work since the
4932                # value may contain a quoted open bracket).
4933                attrStart = stripped.index('[')
4934            except ValueError:
4935                raise DotParseError(
4936                    f"Unmatched ']' on line:\n  {repr(nodeLine)}"
4937                )
4938            hasAttrs = parseSimpleDotAttrs(
4939                stripped[attrStart:]
4940            )
4941            stripped = stripped[:attrStart].rstrip()
4942
4943        if ' ' in stripped:
4944            raise DotParseError(
4945                f"Unquoted multi-word node on line:\n  {repr(nodeLine)}"
4946            )
4947        else:
4948            nodeName = stripped
4949
4950    try:
4951        nodeID = int(nodeName)
4952    except ValueError:
4953        raise DotParseError(
4954            f"Node name f{repr(nodeName)} is not an integer on"
4955            f" line:\n {repr(nodeLine)}"
4956        )
4957
4958    return (nodeID, hasAttrs)
4959
4960
4961def parseDotAttr(attrLine: str) -> Tuple[str, str]:
4962    """
4963    Given a line of text from a `graphviz` dot-format graph, parses
4964    it as an attribute (maybe-quoted-attr-name =
4965    maybe-quoted-attr-value). Returns the (maybe-unquoted) attr-name
4966    and the (maybe-unquoted) attr-value as a pair of strings. Raises
4967    a `DotParseError` if the line cannot be parsed as an attribute.
4968    Examples:
4969
4970    >>> parseDotAttr("a=b")
4971    ('a', 'b')
4972    >>> parseDotAttr("  a = b ")
4973    ('a', 'b')
4974    >>> parseDotAttr('"a" = "b"')
4975    ('a', 'b')
4976    >>> parseDotAttr('"a" -> "b"')
4977    Traceback (most recent call last):
4978    ...
4979    exploration.parsing.DotParseError...
4980    >>> parseDotAttr('"a" = "b" c')
4981    Traceback (most recent call last):
4982    ...
4983    exploration.parsing.DotParseError...
4984    >>> parseDotAttr('a')
4985    Traceback (most recent call last):
4986    ...
4987    exploration.parsing.DotParseError...
4988    >>> parseDotAttr('')
4989    Traceback (most recent call last):
4990    ...
4991    exploration.parsing.DotParseError...
4992    >>> parseDotAttr('0 [ name="A" ]')
4993    Traceback (most recent call last):
4994    ...
4995    exploration.parsing.DotParseError...
4996    """
4997    stripped = attrLine.lstrip()
4998    if len(stripped) == 0:
4999        raise DotParseError(
5000            "Empty attribute in dot graph on line:\n  {repr(attrLine)}"
5001        )
5002    if stripped.endswith(']') or stripped.endswith('['):
5003        raise DotParseError(
5004            f"Node attribute ends in '[' or ']' on line:"
5005            f"\n  {repr(attrLine)}"
5006        )
5007    if stripped.startswith('"'):
5008        try:
5009            attrName, rest = utils.unquoted(stripped)
5010        except ValueError:
5011            raise DotParseError(
5012                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5013            )
5014        rest = rest.lstrip()
5015        if len(rest) == 0 or rest[0] != '=':
5016            raise DotParseError(
5017                f"No equals sign following attribute name on"
5018                f" line:\n  {repr(attrLine)}"
5019            )
5020        rest = rest[1:].lstrip()
5021    else:
5022        try:
5023            eqInd = stripped.index('=')
5024        except ValueError:
5025            raise DotParseError(
5026                f"No equals sign in attribute line:"
5027                f"\n  {repr(attrLine)}"
5028            )
5029        attrName = stripped[:eqInd].rstrip()
5030        rest = stripped[eqInd + 1:].lstrip()
5031
5032    if rest[0] == '"':
5033        try:
5034            attrVal, rest = utils.unquoted(rest)
5035        except ValueError:
5036            raise DotParseError(
5037                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5038            )
5039        if rest.strip():
5040            raise DotParseError(
5041                f"Junk after attribute on line:"
5042                f"\n  {repr(attrLine)}"
5043            )
5044    else:
5045        attrVal = rest.rstrip()
5046
5047    return attrName, attrVal
5048
5049
5050def parseDotEdge(edgeLine: str) -> Tuple[int, int, bool]:
5051    """
5052    Given a line of text from a `graphviz` dot-format graph, parses
5053    it as an edge (fromID -> toID). Returns a tuple containing the
5054    from ID, the to ID, and a boolean indicating whether attributes
5055    follow the edge on subsequent lines (true if the line ends with
5056    '['). Raises a `DotParseError` if the line cannot be parsed as
5057    an edge pair. Examples:
5058
5059    >>> parseDotEdge("1 -> 2")
5060    (1, 2, False)
5061    >>> parseDotEdge("  1 -> 2 ")
5062    (1, 2, False)
5063    >>> parseDotEdge('"1" -> "2"')
5064    (1, 2, False)
5065    >>> parseDotEdge('"1" -> "2" [')
5066    (1, 2, True)
5067    >>> parseDotEdge("a -> b")
5068    Traceback (most recent call last):
5069    ...
5070    exploration.parsing.DotParseError...
5071    >>> parseDotEdge('"1" = "1"')
5072    Traceback (most recent call last):
5073    ...
5074    exploration.parsing.DotParseError...
5075    >>> parseDotEdge('"1" -> "2" c')
5076    Traceback (most recent call last):
5077    ...
5078    exploration.parsing.DotParseError...
5079    >>> parseDotEdge('1')
5080    Traceback (most recent call last):
5081    ...
5082    exploration.parsing.DotParseError...
5083    >>> parseDotEdge('')
5084    Traceback (most recent call last):
5085    ...
5086    exploration.parsing.DotParseError...
5087    """
5088    stripped = edgeLine.lstrip()
5089    if len(stripped) == 0:
5090        raise DotParseError(
5091            "Empty edge in dot graph on line:\n  {repr(edgeLine)}"
5092        )
5093    if stripped.startswith('"'):
5094        try:
5095            fromStr, rest = utils.unquoted(stripped)
5096        except ValueError:
5097            raise DotParseError(
5098                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5099            )
5100        rest = rest.lstrip()
5101        if rest[:2] != '->':
5102            raise DotParseError(
5103                f"No arrow sign following source name on"
5104                f" line:\n  {repr(edgeLine)}"
5105            )
5106        rest = rest[2:].lstrip()
5107    else:
5108        try:
5109            arrowInd = stripped.index('->')
5110        except ValueError:
5111            raise DotParseError(
5112                f"No arrow in edge line:"
5113                f"\n  {repr(edgeLine)}"
5114            )
5115        fromStr = stripped[:arrowInd].rstrip()
5116        rest = stripped[arrowInd + 2:].lstrip()
5117        if ' ' in fromStr:
5118            raise DotParseError(
5119                f"Unquoted multi-word edge source on line:"
5120                f"\n  {repr(edgeLine)}"
5121            )
5122
5123    hasAttrs = False
5124    if rest[0] == '"':
5125        try:
5126            toStr, rest = utils.unquoted(rest)
5127        except ValueError:
5128            raise DotParseError(
5129                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5130            )
5131        stripped = rest.strip()
5132        if stripped == '[':
5133            hasAttrs = True
5134        elif stripped:
5135            raise DotParseError(
5136                f"Junk after edge on line:"
5137                f"\n  {repr(edgeLine)}"
5138            )
5139    else:
5140        toStr = rest.rstrip()
5141        if toStr.endswith('['):
5142            toStr = toStr[:-1].rstrip()
5143            hasAttrs = True
5144        if ' ' in toStr:
5145            raise DotParseError(
5146                f"Unquoted multi-word edge destination on line:"
5147                f"\n  {repr(edgeLine)}"
5148            )
5149
5150    try:
5151        fromID = int(fromStr)
5152    except ValueError:
5153        raise DotParseError(
5154            f"Invalid 'from' ID: {repr(fromStr)} on line:"
5155            f"\n  {repr(edgeLine)}"
5156        )
5157
5158    try:
5159        toID = int(toStr)
5160    except ValueError:
5161        raise DotParseError(
5162            f"Invalid 'to' ID: {repr(toStr)} on line:"
5163            f"\n  {repr(edgeLine)}"
5164        )
5165
5166    return (fromID, toID, hasAttrs)
5167
5168
5169def parseDotAttrList(
5170    lines: List[str]
5171) -> Tuple[List[Tuple[str, str]], List[str]]:
5172    """
5173    Given a list of lines of text from a `graphviz` dot-format
5174    graph which starts with an attribute line, parses multiple
5175    attribute lines until a line containing just ']' is found.
5176    Returns a list of the parsed name/value attribute pair tuples,
5177    along with a list of remaining unparsed strings (not counting
5178    the closing ']' line). Raises a `DotParseError` if it finds a
5179    non-attribute line or if it fails to find a closing ']' line.
5180    Examples:
5181
5182    >>> parseDotAttrList([
5183    ...     'a=b\\n',
5184    ...     'c=d\\n',
5185    ...     ']\\n',
5186    ... ])
5187    ([('a', 'b'), ('c', 'd')], [])
5188    >>> parseDotAttrList([
5189    ...     'a=b',
5190    ...     'c=d',
5191    ...     '  ]',
5192    ...     'more',
5193    ...     'lines',
5194    ... ])
5195    ([('a', 'b'), ('c', 'd')], ['more', 'lines'])
5196    >>> parseDotAttrList([
5197    ...     'a=b',
5198    ...     'c=d',
5199    ... ])
5200    Traceback (most recent call last):
5201    ...
5202    exploration.parsing.DotParseError...
5203    """
5204    index = 0
5205    found = []
5206    while index < len(lines):
5207        thisLine = lines[index]
5208        try:
5209            found.append(parseDotAttr(thisLine))
5210        except DotParseError:
5211            if thisLine.strip() == ']':
5212                return (found, lines[index + 1:])
5213            else:
5214                raise DotParseError(
5215                    f"Could not parse attribute from line:"
5216                    f"\n  {repr(thisLine)}"
5217                    f"\nAttributes block starts on line:"
5218                    f"\n  {repr(lines[0])}"
5219                )
5220        index += 1
5221
5222    raise DotParseError(
5223        f"No list terminator (']') for attributes starting on line:"
5224        f"\n  {repr(lines[0])}"
5225    )
5226
5227
5228def parseDotSubgraphStart(line: str) -> str:
5229    """
5230    Parses the start of a subgraph from a line of a graph file. The
5231    line must start with the word 'subgraph' and then have a name,
5232    followed by a '{' at the end of the line. Raises a
5233    `DotParseError` if this format doesn't match. Examples:
5234
5235    >>> parseDotSubgraphStart('subgraph A {')
5236    'A'
5237    >>> parseDotSubgraphStart('subgraph A B {')
5238    Traceback (most recent call last):
5239    ...
5240    exploration.parsing.DotParseError...
5241    >>> parseDotSubgraphStart('subgraph "A B" {')
5242    'A B'
5243    >>> parseDotSubgraphStart('subgraph A')
5244    Traceback (most recent call last):
5245    ...
5246    exploration.parsing.DotParseError...
5247    """
5248    stripped = line.strip()
5249    if len(stripped) == 0:
5250        raise DotParseError(
5251            f"Empty line where subgraph was expected:"
5252            f"\n  {repr(line)}"
5253        )
5254
5255    if not stripped.startswith('subgraph '):
5256        raise DotParseError(
5257            f"Subgraph doesn't start with 'subgraph' on line:"
5258            f"\n  {repr(line)}"
5259        )
5260
5261    stripped = stripped[9:]
5262    if stripped.startswith('"'):
5263        try:
5264            name, rest = utils.unquoted(stripped)
5265        except ValueError:
5266            raise DotParseError(
5267                f"Malformed quotes on subgraph line:\n {repr(line)}"
5268            )
5269        if rest.strip() != '{':
5270            raise DotParseError(
5271                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5272            )
5273    else:
5274        parts = stripped.split()
5275        if len(parts) != 2 or parts[1] != '{':
5276            raise DotParseError(
5277                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5278            )
5279        name, _ = parts
5280
5281    return name
5282
5283
5284def parseDotGraphContents(
5285    lines: List[str]
5286) -> Tuple[ParsedDotGraph, List[str]]:
5287    """
5288    Given a list of lines from a `graphviz` dot-format string,
5289    parses the list as the contents of a graph (or subgraph),
5290    stopping when it reaches a line that just contains '}'. Raises a
5291    `DotParseError` if it cannot do so or if the terminator is
5292    missing. Returns a tuple containing the parsed graph data (see
5293    `ParsedDotGraph` and the list of remaining lines after the
5294    terminator. Recursively parses subgraphs. Example:
5295
5296    >>> bits = parseDotGraphContents([
5297    ...     '"graph attr"=1',
5298    ...     '1 [',
5299    ...     '  attr=value',
5300    ...     ']',
5301    ...     '1 -> 2 [',
5302    ...     '  fullLabel="to_B"',
5303    ...     '  quality=number',
5304    ...     ']',
5305    ...     'subgraph name {',
5306    ...     '  300',
5307    ...     '  400',
5308    ...     '  300 -> 400 [',
5309    ...     '    fullLabel=forward',
5310    ...     '  ]',
5311    ...     '}',
5312    ...     '}',
5313    ... ])
5314    >>> len(bits)
5315    2
5316    >>> g = bits[0]
5317    >>> bits[1]
5318    []
5319    >>> sorted(g.keys())
5320    ['attrs', 'edges', 'nodes', 'subgraphs']
5321    >>> g['nodes']
5322    [(1, [('attr', 'value')])]
5323    >>> g['edges']
5324    [(1, 2, [('fullLabel', 'to_B'), ('quality', 'number')])]
5325    >>> g['attrs']
5326    [('graph attr', '1')]
5327    >>> sgs = g['subgraphs']
5328    >>> len(sgs)
5329    1
5330    >>> len(sgs[0])
5331    2
5332    >>> sgs[0][0]
5333    'name'
5334    >>> sg = sgs[0][1]
5335    >>> sorted(sg.keys())
5336    ['attrs', 'edges', 'nodes', 'subgraphs']
5337    >>> sg["nodes"]
5338    [(300, []), (400, [])]
5339    >>> sg["edges"]
5340    [(300, 400, [('fullLabel', 'forward')])]
5341    >>> sg["attrs"]
5342    []
5343    >>> sg["subgraphs"]
5344    []
5345    """
5346    result: ParsedDotGraph = {
5347        'nodes': [],
5348        'edges': [],
5349        'attrs': [],
5350        'subgraphs': [],
5351    }
5352    index = 0
5353    remainder = None
5354    # Consider each line:
5355    while index < len(lines):
5356        # Grab line and pre-increment index
5357        thisLine = lines[index]
5358        index += 1
5359
5360        # Check for } first because it could be parsed as a node
5361        stripped = thisLine.strip()
5362        if stripped == '}':
5363            remainder = lines[index:]
5364            break
5365        elif stripped == '':  # ignore blank lines
5366            continue
5367
5368        # Cascading parsing attempts, since the possibilities are
5369        # mostly mutually exclusive.
5370        # TODO: Node/attr confusion with = in a node name?
5371        try:
5372            attrName, attrVal = parseDotAttr(thisLine)
5373            result['attrs'].append((attrName, attrVal))
5374        except DotParseError:
5375            try:
5376                fromNode, toNode, hasEAttrs = parseDotEdge(
5377                    thisLine
5378                )
5379                if hasEAttrs:
5380                    attrs, rest = parseDotAttrList(
5381                        lines[index:]
5382                    )
5383                    # Restart to process rest
5384                    lines = rest
5385                    index = 0
5386                else:
5387                    attrs = []
5388                result['edges'].append((fromNode, toNode, attrs))
5389            except DotParseError:
5390                try:
5391                    nodeName, hasNAttrs = parseDotNode(
5392                        thisLine
5393                    )
5394                    if hasNAttrs is True:
5395                        attrs, rest = parseDotAttrList(
5396                            lines[index:]
5397                        )
5398                        # Restart to process rest
5399                        lines = rest
5400                        index = 0
5401                    elif hasNAttrs:
5402                        attrs = hasNAttrs
5403                    else:
5404                        attrs = []
5405                    result['nodes'].append((nodeName, attrs))
5406                except DotParseError:
5407                    try:
5408                        subName = parseDotSubgraphStart(
5409                            thisLine
5410                        )
5411                        subStuff, rest = \
5412                            parseDotGraphContents(
5413                                lines[index:]
5414                            )
5415                        result['subgraphs'].append((subName, subStuff))
5416                        # Restart to process rest
5417                        lines = rest
5418                        index = 0
5419                    except DotParseError:
5420                        raise DotParseError(
5421                            f"Unrecognizable graph line (possibly"
5422                            f" beginning of unfinished structure):"
5423                            f"\n  {repr(thisLine)}"
5424                        )
5425    if remainder is None:
5426        raise DotParseError(
5427            f"Graph (or subgraph) is missing closing '}}'. Starts"
5428            f" on line:\n  {repr(lines[0])}"
5429        )
5430    else:
5431        return (result, remainder)
5432
5433
5434def parseDot(
5435    dotStr: str,
5436    parseFormat: ParseFormat = ParseFormat()
5437) -> core.DecisionGraph:
5438    """
5439    Converts a `graphviz` dot-format string into a `core.DecisionGraph`.
5440    A custom `ParseFormat` may be specified if desired; the default
5441    `ParseFormat` is used if not. Note that this relies on specific
5442    indentation schemes used by `toDot` so a hand-edited dot-format
5443    graph will probably not work. A `DotParseError` is raised if the
5444    provided string can't be parsed. Example
5445
5446    >>> parseDotNode(' 3 [ label="A = \\\\"grate:open\\\\"" ]')
5447    (3, [('label', 'A = "grate:open"')])
5448    >>> sg = '''\
5449    ... subgraph __requirements__ {
5450    ...   3 [ label="A = \\\\"grate:open\\\\"" ]
5451    ...   4 [ label="B = \\\\"!(helmet)\\\\"" ]
5452    ...   5 [ label="C = \\\\"helmet\\\\"" ]
5453    ... }'''
5454    >>> parseDotGraphContents(sg.splitlines()[1:])
5455    ({'nodes': [(3, [('label', 'A = "grate:open"')]),\
5456 (4, [('label', 'B = "!(helmet)"')]), (5, [('label', 'C = "helmet"')])],\
5457 'edges': [], 'attrs': [], 'subgraphs': []}, [])
5458    >>> from . import core
5459    >>> dg = core.DecisionGraph.example('simple')
5460    >>> encoded = toDot(dg)
5461    >>> reconstructed = parseDot(encoded)
5462    >>> for diff in dg.listDifferences(reconstructed):
5463    ...     print(diff)
5464    >>> reconstructed == dg
5465    True
5466    >>> dg = core.DecisionGraph.example('abc')
5467    >>> encoded = toDot(dg)
5468    >>> reconstructed = parseDot(encoded)
5469    >>> for diff in dg.listDifferences(reconstructed):
5470    ...     print(diff)
5471    >>> reconstructed == dg
5472    True
5473    >>> tg = core.DecisionGraph()
5474    >>> tg.addDecision('A')
5475    0
5476    >>> tg.addDecision('B')
5477    1
5478    >>> tg.addTransition('A', 'up', 'B', 'down')
5479    >>> same = parseDot('''
5480    ... digraph {
5481    ...     0 [ name=A label=A ]
5482    ...       0 -> 1 [
5483    ...         label=up
5484    ...         fullLabel=up
5485    ...         reciprocal=down
5486    ...       ]
5487    ...     1 [ name=B label=B ]
5488    ...       1 -> 0 [
5489    ...         label=down
5490    ...         fullLabel=down
5491    ...         reciprocal=up
5492    ...       ]
5493    ... }''')
5494    >>> for diff in tg.listDifferences(same):
5495    ...     print(diff)
5496    >>> same == tg
5497    True
5498    >>> pf = ParseFormat()
5499    >>> tg.setTransitionRequirement('A', 'up', pf.parseRequirement('one|two'))
5500    >>> tg.setConsequence(
5501    ...     'B',
5502    ...     'down',
5503    ...     [base.effect(gain="one")]
5504    ... )
5505    >>> test = parseDot('''
5506    ...   digraph {
5507    ...     0 [ name="A = \\\\"one|two\\\\"" label="A = \\\\"one|two\\\\"" ]
5508    ...   }
5509    ... ''')
5510    >>> list(test.nodes)
5511    [0]
5512    >>> test.nodes[0]['name']
5513    'A = "one|two"'
5514    >>> eff = (
5515    ...   r'"A = \\"[{\\\\\\"type\\\\\\": \\\\\\"gain\\\\\\",'
5516    ...   r' \\\\\\"applyTo\\\\\\": \\\\\\"active\\\\\\",'
5517    ...   r' \\\\\\"value\\\\\\": \\\\\\"one\\\\\\",'
5518    ...   r' \\\\\\"charges\\\\\\": null, \\\\\\"hidden\\\\\\": false,'
5519    ...   r' \\\\\\"delay\\\\\\": null}]\\""'
5520    ... )
5521    >>> utils.unquoted(eff)[1]
5522    ''
5523    >>> test2 = parseDot(
5524    ...     'digraph {\\n 0 [ name=' + eff + ' label=' + eff + ' ]\\n}'
5525    ... )
5526    >>> s = test2.nodes[0]['name']
5527    >>> s[:25]
5528    'A = "[{\\\\"type\\\\": \\\\"gain\\\\"'
5529    >>> s[25:50]
5530    ', \\\\"applyTo\\\\": \\\\"active\\\\"'
5531    >>> s[50:70]
5532    ', \\\\"value\\\\": \\\\"one\\\\"'
5533    >>> s[70:89]
5534    ', \\\\"charges\\\\": null'
5535    >>> s[89:108]
5536    ', \\\\"hidden\\\\": false'
5537    >>> s[108:]
5538    ', \\\\"delay\\\\": null}]"'
5539    >>> ae = s[s.index('=') + 1:].strip()
5540    >>> uq, after = utils.unquoted(ae)
5541    >>> after
5542    ''
5543    >>> fromJSON(uq) == [base.effect(gain="one")]
5544    True
5545    >>> same = parseDot('''
5546    ... digraph {
5547    ...   0 [ name=A label=A ]
5548    ...     0 -> 1 [
5549    ...       label=up
5550    ...       fullLabel=up
5551    ...       reciprocal=down
5552    ...       req=A
5553    ...     ]
5554    ...   1 [ name=B label=B ]
5555    ...     1 -> 0 [
5556    ...       label=down
5557    ...       fullLabel=down
5558    ...       reciprocal=up
5559    ...       consequence=A
5560    ...     ]
5561    ...   subgraph __requirements__ {
5562    ...     2 [ label="A = \\\\"one|two\\\\"" ]
5563    ...   }
5564    ...   subgraph __consequences__ {
5565    ...     3 [ label=''' + eff + ''' ]
5566    ...   }
5567    ... }''')
5568    >>> c = {'tags': {}, 'annotations': [], 'reciprocal': 'up', 'consequence': [{'type': 'gain', 'applyTo': 'active', 'value': 'one', 'delay': None, 'charges': None}]}['consequence']  # noqa
5569
5570    >>> for diff in tg.listDifferences(same):
5571    ...     print(diff)
5572    >>> same == tg
5573    True
5574    """
5575    lines = dotStr.splitlines()
5576    while lines[0].strip() == '':
5577        lines.pop(0)
5578    if lines.pop(0).strip() != "digraph {":
5579        raise DotParseError("Input doesn't begin with 'digraph {'.")
5580
5581    # Create our result
5582    result = core.DecisionGraph()
5583
5584    # Parse to intermediate graph data structure
5585    graphStuff, remaining = parseDotGraphContents(lines)
5586    if remaining:
5587        if len(remaining) <= 4:
5588            junk = '\n  '.join(repr(line) for line in remaining)
5589        else:
5590            junk = '\n  '.join(repr(line) for line in remaining[:4])
5591            junk += '\n  ...'
5592        raise DotParseError("Extra junk after graph:\n  {junk}")
5593
5594    # Sort out subgraphs to find legends
5595    zoneSubs = []
5596    reqLegend = None
5597    consequenceLegend = None
5598    mechanismLegend = None
5599    for sub in graphStuff['subgraphs']:
5600        if sub[0] == '__requirements__':
5601            reqLegend = sub[1]
5602        elif sub[0] == '__consequences__':
5603            consequenceLegend = sub[1]
5604        elif sub[0] == '__mechanisms__':
5605            mechanismLegend = sub[1]
5606        else:
5607            zoneSubs.append(sub)
5608
5609    # Build out our mapping from requirement abbreviations to actual
5610    # requirement objects
5611    reqMap: Dict[str, base.Requirement] = {}
5612    if reqLegend is not None:
5613        if reqLegend['edges']:
5614            raise DotParseError(
5615                f"Requirements legend subgraph has edges:"
5616                f"\n  {repr(reqLegend['edges'])}"
5617                f"\n(It should only have nodes.)"
5618            )
5619        if reqLegend['attrs']:
5620            raise DotParseError(
5621                f"Requirements legend subgraph has attributes:"
5622                f"\n  {repr(reqLegend['attrs'])}"
5623                f"\n(It should only have nodes.)"
5624            )
5625        if reqLegend['subgraphs']:
5626            raise DotParseError(
5627                f"Requirements legend subgraph has subgraphs:"
5628                f"\n  {repr(reqLegend['subgraphs'])}"
5629                f"\n(It should only have nodes.)"
5630            )
5631        for node, attrs in reqLegend['nodes']:
5632            if not attrs:
5633                raise DotParseError(
5634                    f"Node in requirements legend missing attributes:"
5635                    f"\n  {repr(attrs)}"
5636                )
5637            if len(attrs) != 1:
5638                raise DotParseError(
5639                    f"Node in requirements legend has multiple"
5640                    f" attributes:\n  {repr(attrs)}"
5641                )
5642            reqStr = attrs[0][1]
5643            try:
5644                eqInd = reqStr.index('=')
5645            except ValueError:
5646                raise DotParseError(
5647                    f"Missing '=' in requirement specifier:"
5648                    f"\n  {repr(reqStr)}"
5649                )
5650            ab = reqStr[:eqInd].rstrip()
5651            encoded = reqStr[eqInd + 1:].lstrip()
5652            try:
5653                encVal, empty = utils.unquoted(encoded)
5654            except ValueError:
5655                raise DotParseError(
5656                    f"Invalid quoted requirement value:"
5657                    f"\n  {repr(encoded)}"
5658                )
5659            if empty.strip():
5660                raise DotParseError(
5661                    f"Extra junk after requirement value:"
5662                    f"\n  {repr(empty)}"
5663                )
5664            try:
5665                req = parseFormat.parseRequirement(encVal)
5666            except ValueError:
5667                raise DotParseError(
5668                    f"Invalid encoded requirement in requirements"
5669                    f" legend:\n  {repr(encVal)}"
5670                )
5671            if ab in reqMap:
5672                raise DotParseError(
5673                    f"Abbreviation '{ab}' was defined multiple"
5674                    f" times in requirements legend."
5675                )
5676            reqMap[ab] = req
5677
5678    # Build out our mapping from consequence abbreviations to actual
5679    # consequence lists
5680    consequenceMap: Dict[str, base.Consequence] = {}
5681    if consequenceLegend is not None:
5682        if consequenceLegend['edges']:
5683            raise DotParseError(
5684                f"Consequences legend subgraph has edges:"
5685                f"\n  {repr(consequenceLegend['edges'])}"
5686                f"\n(It should only have nodes.)"
5687            )
5688        if consequenceLegend['attrs']:
5689            raise DotParseError(
5690                f"Consequences legend subgraph has attributes:"
5691                f"\n  {repr(consequenceLegend['attrs'])}"
5692                f"\n(It should only have nodes.)"
5693            )
5694        if consequenceLegend['subgraphs']:
5695            raise DotParseError(
5696                f"Consequences legend subgraph has subgraphs:"
5697                f"\n  {repr(consequenceLegend['subgraphs'])}"
5698                f"\n(It should only have nodes.)"
5699            )
5700        for node, attrs in consequenceLegend['nodes']:
5701            if not attrs:
5702                raise DotParseError(
5703                    f"Node in consequence legend missing attributes:"
5704                    f"\n  {repr(attrs)}"
5705                )
5706            if len(attrs) != 1:
5707                raise DotParseError(
5708                    f"Node in consequences legend has multiple"
5709                    f" attributes:\n  {repr(attrs)}"
5710                )
5711            consStr = attrs[0][1]
5712            try:
5713                eqInd = consStr.index('=')
5714            except ValueError:
5715                raise DotParseError(
5716                    f"Missing '=' in consequence string:"
5717                    f"\n  {repr(consStr)}"
5718                )
5719            ab = consStr[:eqInd].rstrip()
5720            encoded = consStr[eqInd + 1:].lstrip()
5721            try:
5722                encVal, empty = utils.unquoted(encoded)
5723            except ValueError:
5724                raise DotParseError(
5725                    f"Invalid quoted consequence value:"
5726                    f"\n  {repr(encoded)}"
5727                )
5728            if empty.strip():
5729                raise DotParseError(
5730                    f"Extra junk after consequence value:"
5731                    f"\n  {repr(empty)}"
5732                )
5733            try:
5734                consequences = fromJSON(encVal)
5735            except json.decoder.JSONDecodeError:
5736                raise DotParseError(
5737                    f"Invalid encoded consequence in requirements"
5738                    f" legend:\n  {repr(encVal)}"
5739                )
5740            if ab in consequenceMap:
5741                raise DotParseError(
5742                    f"Abbreviation '{ab}' was defined multiple"
5743                    f" times in effects legend."
5744                )
5745            consequenceMap[ab] = consequences
5746
5747    # Reconstruct mechanisms
5748    if mechanismLegend is not None:
5749        if mechanismLegend['edges']:
5750            raise DotParseError(
5751                f"Mechanisms legend subgraph has edges:"
5752                f"\n  {repr(mechanismLegend['edges'])}"
5753                f"\n(It should only have nodes.)"
5754            )
5755        if mechanismLegend['attrs']:
5756            raise DotParseError(
5757                f"Mechanisms legend subgraph has attributes:"
5758                f"\n  {repr(mechanismLegend['attrs'])}"
5759                f"\n(It should only have nodes.)"
5760            )
5761        if mechanismLegend['subgraphs']:
5762            raise DotParseError(
5763                f"Mechanisms legend subgraph has subgraphs:"
5764                f"\n  {repr(mechanismLegend['subgraphs'])}"
5765                f"\n(It should only have nodes.)"
5766            )
5767        for node, attrs in mechanismLegend['nodes']:
5768            if not attrs:
5769                raise DotParseError(
5770                    f"Node in mechanisms legend missing attributes:"
5771                    f"\n  {repr(attrs)}"
5772                )
5773            if len(attrs) != 1:
5774                raise DotParseError(
5775                    f"Node in mechanisms legend has multiple"
5776                    f" attributes:\n  {repr(attrs)}"
5777                )
5778            mechStr = attrs[0][1]
5779            try:
5780                atInd = mechStr.index('@')
5781                colonInd = mechStr.index(':')
5782            except ValueError:
5783                raise DotParseError(
5784                    f"Missing '@' or ':' in mechanism string:"
5785                    f"\n  {repr(mechStr)}"
5786                )
5787            if atInd > colonInd:
5788                raise DotParseError(
5789                    f"':' after '@' in mechanism string:"
5790                    f"\n  {repr(mechStr)}"
5791                )
5792            mID: base.MechanismID
5793            where: Optional[base.DecisionID]
5794            mName: base.MechanismName
5795            try:
5796                mID = int(mechStr[:atInd].rstrip())
5797            except ValueError:
5798                raise DotParseError(
5799                    f"Invalid mechanism ID in mechanism string:"
5800                    f"\n  {repr(mechStr)}"
5801                )
5802            try:
5803                whereStr = mechStr[atInd + 1:colonInd].strip()
5804                if whereStr == "None":
5805                    where = None
5806                else:
5807                    where = int(whereStr)
5808            except ValueError:
5809                raise DotParseError(
5810                    f"Invalid mechanism location in mechanism string:"
5811                    f"\n  {repr(mechStr)}"
5812                )
5813            mName, rest = utils.unquoted(mechStr[colonInd + 1:].lstrip())
5814            if rest.strip():
5815                raise DotParseError(
5816                    f"Junk after mechanism name in mechanism string:"
5817                    f"\n  {repr(mechStr)}"
5818                )
5819            result.mechanisms[mID] = (where, mName)
5820            if where is None:
5821                result.globalMechanisms[mName] = mID
5822
5823    # Add zones to the graph based on parent info
5824    # Map from zones to children we should add to them once all
5825    # zones are created:
5826    zoneChildMap: Dict[str, List[str]] = {}
5827    for prefixedName, graphData in zoneSubs:
5828        # Chop off cluster_ or _ prefix:
5829        zoneName = prefixedName[prefixedName.index('_') + 1:]
5830        if graphData['edges']:
5831            raise DotParseError(
5832                f"Zone subgraph for zone {repr(zoneName)} has edges:"
5833                f"\n  {repr(graphData['edges'])}"
5834                f"\n(It should only have nodes and attributes.)"
5835            )
5836        if graphData['subgraphs']:
5837            raise DotParseError(
5838                f"Zone subgraph for zone {repr(zoneName)} has"
5839                f" subgraphs:"
5840                f"\n  {repr(graphData['subgraphs'])}"
5841                f"\n(It should only have nodes and attributes.)"
5842            )
5843        # Note: we ignore nodes as that info is used for
5844        # visualization but is redundant with the zone parent info
5845        # stored in nodes, and it would be tricky to tease apart
5846        # direct vs. indirect relationships from merged info.
5847        parents = None
5848        level = None
5849        for attr, aVal in graphData['attrs']:
5850            if attr == 'parents':
5851                try:
5852                    parents = set(fromJSON(aVal))
5853                except json.decoder.JSONDecodeError:
5854                    raise DotParseError(
5855                        f"Invalid parents JSON in zone subgraph for"
5856                        f" zone '{zoneName}':\n  {repr(aVal)}"
5857                    )
5858            elif attr == 'level':
5859                try:
5860                    level = int(aVal)
5861                except ValueError:
5862                    raise DotParseError(
5863                        f"Invalid level in zone subgraph for"
5864                        f" zone '{zoneName}':\n  {repr(aVal)}"
5865                    )
5866            elif attr == 'label':
5867                pass  # name already extracted from the subgraph name
5868
5869            else:
5870                raise DotParseError(
5871                    f"Unexpected attribute '{attr}' in zone"
5872                    f" subgraph for zone '{zoneName}'"
5873                )
5874        if parents is None:
5875            raise DotParseError(
5876                f"No parents attribute for zone '{zoneName}'."
5877                f" Graph is:\n  {repr(graphData)}"
5878            )
5879        if level is None:
5880            raise DotParseError(
5881                f"No level attribute for zone '{zoneName}'."
5882                f" Graph is:\n  {repr(graphData)}"
5883            )
5884
5885        # Add ourself to our parents in the child map
5886        for parent in parents:
5887            zoneChildMap.setdefault(parent, []).append(zoneName)
5888
5889        # Create this zone
5890        result.createZone(zoneName, level)
5891
5892    # Add zone parent/child relationships
5893    for parent, children in zoneChildMap.items():
5894        for child in children:
5895            result.addZoneToZone(child, parent)
5896
5897    # Add nodes to the graph
5898    for (node, attrs) in graphStuff['nodes']:
5899        name: Optional[str] = None
5900        annotations = []
5901        tags: Dict[base.Tag, base.TagValue] = {}
5902        zones = []
5903        for attr, aVal in attrs:
5904            if attr == 'name':  # it's the name
5905                name = aVal
5906            elif attr == 'label':  # zone + name; redundant
5907                pass
5908            elif attr.startswith('t_'):  # it's a tag
5909                tagName = attr[2:]
5910                try:
5911                    tagAny = fromJSON(aVal)
5912                except json.decoder.JSONDecodeError:
5913                    raise DotParseError(
5914                        f"Error in JSON for tag attr '{attr}' of node"
5915                        f" '{node}'"
5916                    )
5917                if isinstance(tagAny, base.TagValueTypes):
5918                    tagVal: base.TagValue = cast(base.TagValue, tagAny)
5919                else:
5920                    raise DotParseError(
5921                        f"JSON for tag value encodes disallowed tag"
5922                        f" value of type {type(tagAny)}. Value is:"
5923                        f"\n  {repr(tagAny)}"
5924                    )
5925                tags[tagName] = tagVal
5926            elif attr.startswith('z_'):  # it's a zone
5927                zones.append(attr[2:])
5928            elif attr == 'annotations':  # It's the annotations
5929                try:
5930                    annotations = fromJSON(aVal)
5931                except json.decoder.JSONDecodeError:
5932                    raise DotParseError(
5933                        f"Bad JSON in attribute '{attr}' of node"
5934                        f" '{node}'"
5935                    )
5936            else:
5937                raise DotParseError(
5938                    f"Unrecognized node attribute '{attr}' for node"
5939                    f" '{node}'"
5940                )
5941
5942        # TODO: Domains here?
5943        if name is None:
5944            raise DotParseError(f"Node '{node}' does not have a name.")
5945
5946        result.addIdentifiedDecision(
5947            node,
5948            name,
5949            tags=tags,
5950            annotations=annotations
5951        )
5952        for zone in zones:
5953            try:
5954                result.addDecisionToZone(node, zone)
5955            except core.MissingZoneError:
5956                raise DotParseError(
5957                    f"Zone '{zone}' for node {node} does not"
5958                    f" exist."
5959                )
5960
5961    # Add mechanisms to each node:
5962    for (mID, (where, mName)) in result.mechanisms.items():
5963        mPool = result.nodes[where].setdefault('mechanisms', {})
5964        if mName in mPool:
5965            raise DotParseError(
5966                f"Multiple mechanisms named {mName!r} at"
5967                f" decision {where}."
5968            )
5969        mPool[mName] = mID
5970
5971    # Reciprocals to double-check once all edges are added
5972    recipChecks: Dict[
5973        Tuple[base.DecisionID, base.Transition],
5974        base.Transition
5975    ] = {}
5976
5977    # Add each edge
5978    for (source, dest, attrs) in graphStuff['edges']:
5979        annotations = []
5980        tags = {}
5981        label = None
5982        requirements = None
5983        consequence = None
5984        reciprocal = None
5985        for attr, aVal in attrs:
5986            if attr.startswith('t_'):
5987                try:
5988                    tags[attr[2:]] = fromJSON(aVal)
5989                except json.decoder.JSONDecodeError:
5990                    raise DotParseError(
5991                        f"Invalid JSON in edge tag '{attr}' for edge"
5992                        f"from '{source}' to '{dest}':"
5993                        f"\n  {repr(aVal)}"
5994                    )
5995            elif attr == "label":  # We ignore the short-label
5996                pass
5997            elif attr == "fullLabel":  # This is our transition name
5998                label = aVal
5999            elif attr == "reciprocal":
6000                reciprocal = aVal
6001            elif attr == "req":
6002                reqAbbr = aVal
6003                if reqAbbr not in reqMap:
6004                    raise DotParseError(
6005                        f"Edge from '{source}' to '{dest}' has"
6006                        f" requirement abbreviation '{reqAbbr}'"
6007                        f" but that abbreviation was not listed"
6008                        f" in the '__requirements__' subgraph."
6009                    )
6010                requirements = reqMap[reqAbbr]
6011            elif attr == "consequence":
6012                consequenceAbbr = aVal
6013                if consequenceAbbr not in reqMap:
6014                    raise DotParseError(
6015                        f"Edge from '{source}' to '{dest}' has"
6016                        f" consequence abbreviation"
6017                        f" '{consequenceAbbr}' but that"
6018                        f" abbreviation was not listed in the"
6019                        f" '__consequences__' subgraph."
6020                    )
6021                consequence = consequenceMap[consequenceAbbr]
6022            elif attr == "annotations":
6023                try:
6024                    annotations = fromJSON(aVal)
6025                except json.decoder.JSONDecodeError:
6026                    raise DotParseError(
6027                        f"Invalid JSON in edge annotations for"
6028                        f" edge from '{source}' to '{dest}':"
6029                        f"\n  {repr(aVal)}"
6030                    )
6031            else:
6032                raise DotParseError(
6033                    f"Unrecognized edge attribute '{attr}' for edge"
6034                    f" from '{source}' to '{dest}'"
6035                )
6036
6037        if label is None:
6038            raise DotParseError(
6039                f"Edge from '{source}' to '{dest}' is missing"
6040                f" a 'fullLabel' attribute."
6041            )
6042
6043        # Add the requested transition
6044        result.addTransition(
6045            source,
6046            label,
6047            dest,
6048            tags=tags,
6049            annotations=annotations,
6050            requires=requirements,  # None works here
6051            consequence=consequence  # None works here
6052        )
6053        # Either we're first or our reciprocal is, so this will only
6054        # trigger for one of the pair
6055        if reciprocal is not None:
6056            recipDest = result.getDestination(dest, reciprocal)
6057            if recipDest is None:
6058                recipChecks[(source, label)] = reciprocal
6059                # we'll get set as a reciprocal when that edge is
6060                # instantiated, we hope, but let's check that later
6061            elif recipDest != source:
6062                raise DotParseError(
6063                    f"Transition '{label}' from '{source}' to"
6064                    f" '{dest}' lists reciprocal '{reciprocal}'"
6065                    f" but that transition from '{dest}' goes to"
6066                    f" '{recipDest}', not '{source}'."
6067                )
6068            else:
6069                # At this point we know the reciprocal edge exists
6070                # and has the appropriate destination (our source).
6071                # No need to check for a pre-existing reciprocal as
6072                # this edge is newly created and cannot already have
6073                # a reciprocal assigned.
6074                result.setReciprocal(source, label, reciprocal)
6075
6076    # Double-check skipped reciprocals
6077    for ((source, transition), reciprocal) in recipChecks.items():
6078        actual = result.getReciprocal(source, transition)
6079        if actual != reciprocal:
6080            raise DotParseError(
6081                f"Transition '{transition}' from '{source}' was"
6082                f" expecting to have reciprocal '{reciprocal}' but"
6083                f" all edges have been processed and its reciprocal"
6084                f" is {repr(actual)}."
6085            )
6086
6087    # Finally get graph-level attribute values
6088    for (name, value) in graphStuff['attrs']:
6089        if name == "unknownCount":
6090            try:
6091                result.unknownCount = int(value)
6092            except ValueError:
6093                raise DotParseError(
6094                    f"Invalid 'unknownCount' value {repr(value)}."
6095                )
6096        elif name == "nextID":
6097            try:
6098                result.nextID = int(value)
6099            except ValueError:
6100                raise DotParseError(
6101                    f"Invalid 'nextID' value:"
6102                    f"\n  {repr(value)}"
6103                )
6104            collisionCourse = [x for x in result if x >= result.nextID]
6105            if len(collisionCourse) > 0:
6106                raise DotParseError(
6107                    f"Next ID {value} is wrong because the graph"
6108                    f" already contains one or more node(s) with"
6109                    f" ID(s) that is/are at least that large:"
6110                    f" {collisionCourse}"
6111                )
6112        elif name == "nextMechanismID":
6113            try:
6114                result.nextMechanismID = int(value)
6115            except ValueError:
6116                raise DotParseError(
6117                    f"Invalid 'nextMechanismID' value:"
6118                    f"\n  {repr(value)}"
6119                )
6120        elif name in (
6121            "equivalences",
6122            "reversionTypes",
6123            "mechanisms",
6124            "globalMechanisms",
6125            "nameLookup"
6126        ):
6127            try:
6128                setattr(result, name, fromJSON(value))
6129            except json.decoder.JSONDecodeError:
6130                raise DotParseError(
6131                    f"Invalid JSON in '{name}' attribute:"
6132                    f"\n  {repr(value)}"
6133                )
6134        else:
6135            raise DotParseError(
6136                f"Graph has unexpected attribute '{name}'."
6137            )
6138
6139    # Final check for mechanism ID value after both mechanism ID and
6140    # mechanisms dictionary have been parsed:
6141    leftBehind = [
6142        x
6143        for x in result.mechanisms
6144        if x >= result.nextMechanismID
6145    ]
6146    if len(leftBehind) > 0:
6147        raise DotParseError(
6148            f"Next mechanism ID {value} is wrong because"
6149            f" the graph already contains one or more"
6150            f" node(s) with ID(s) that is/are at least that"
6151            f" large: {leftBehind}"
6152        )
6153
6154    # And we're done!
6155    return result
6156
6157
6158def toDot(
6159    graph: core.DecisionGraph,
6160    clusterLevels: Union[str, List[int]] = [0]
6161) -> str:
6162    """
6163    Converts the decision graph into a "dot"-format string suitable
6164    for processing by `graphviz`.
6165
6166    See [the dot language
6167    specification](https://graphviz.org/doc/info/lang.html) for more
6168    detail on the syntax we convert to.
6169
6170    If `clusterLevels` is given, it should be either the string '*',
6171    or a list of integers. '*' means that all zone levels should be
6172    cluster-style subgraphs, while a list of integers specifies that
6173    zones at those levels should be cluster-style subgraphs. This
6174    will prefix the subgraph names with 'cluster_' instead of just
6175    '_'.
6176
6177    TODO: Check edge cases for quotes in capability names, tag names,
6178    transition names, annotations, etc.
6179
6180    TODO: At least colons not allowed in tag names!
6181
6182    TODO: Spaces in decision/transition names? Other special
6183    characters in those names?
6184    """
6185    # Set up result including unknownCount and nextID
6186    result = (
6187        f"digraph {{"
6188        f"\n  unknownCount={graph.unknownCount}"
6189        f"\n  nextID={graph.nextID}"
6190        f"\n  nextMechanismID={graph.nextMechanismID}"
6191        f"\n"
6192    )
6193
6194    # Dictionaries for using letters to substitute for unique
6195    # requirements/consequences found throughout the graph. Keys are
6196    # quoted requirement or consequence reprs, and values are
6197    # abbreviation strings for them.
6198    currentReqKey = utils.nextAbbrKey(None)
6199    currentEffectKey = utils.nextAbbrKey(None)
6200    reqKeys: Dict[str, str] = {}
6201    consequenceKeys: Dict[str, str] = {}
6202
6203    # Add all decision and transition info
6204    decision: base.DecisionID  # TODO: Fix Multidigraph type stubs
6205    for decision in graph.nodes:
6206        nodeInfo = graph.nodes[decision]
6207        tags = nodeInfo.get('tags', {})
6208        annotations = toJSON(nodeInfo.get('annotations', []))
6209        zones = nodeInfo.get('zones', set())
6210        nodeAttrs = f"\n    name={utils.quoted(nodeInfo['name'])}"
6211        immediateZones = [z for z in zones if graph.zoneHierarchyLevel(z) == 0]
6212        if len(immediateZones) > 0:
6213            useZone = sorted(immediateZones)[0]
6214            # TODO: Don't hardcode :: here?
6215            withZone = useZone + "::" + nodeInfo['name']
6216            nodeAttrs += f"\n    label={utils.quoted(withZone)}"
6217        else:
6218            nodeAttrs += f"\n    label={utils.quoted(nodeInfo['name'])}"
6219        for tag, value in tags.items():
6220            rep = utils.quoted(toJSON(value))
6221            nodeAttrs += f"\n    t_{tag}={rep}"
6222        for z in sorted(zones):
6223            nodeAttrs += f"\n    z_{z}=1"
6224        if annotations:
6225            nodeAttrs += '\n    annotations=' + utils.quoted(annotations)
6226
6227        result += f'\n  {decision} [{nodeAttrs}\n  ]'
6228
6229        for (transition, destination) in graph._byEdge[decision].items():
6230            edgeAttrs = (
6231                '\n      label='
6232              + utils.quoted(utils.abbr(transition))
6233            )
6234            edgeAttrs += (
6235                '\n      fullLabel='
6236              + utils.quoted(transition)
6237            )
6238            reciprocal = graph.getReciprocal(decision, transition)
6239            if reciprocal is not None:
6240                edgeAttrs += (
6241                    '\n      reciprocal='
6242                  + utils.quoted(reciprocal)
6243                )
6244            info = graph.edges[
6245                decision,  # type:ignore
6246                destination,
6247                transition
6248            ]
6249            if 'requirement' in info:
6250                # Get string rep for requirement
6251                rep = utils.quoted(info['requirement'].unparse())
6252                # Get assigned abbreviation or assign one
6253                if rep in reqKeys:
6254                    ab = reqKeys[rep]
6255                else:
6256                    ab = currentReqKey
6257                    reqKeys[rep] = ab
6258                    currentReqKey = utils.nextAbbrKey(currentReqKey)
6259                # Add abbreviation as edge attribute
6260                edgeAttrs += f'\n      req={ab}'
6261            if 'consequence' in info:
6262                # Get string representation of consequences
6263                rep = utils.quoted(
6264                    toJSON(info['consequence'])
6265                )
6266                # Get abbreviation for that or assign one:
6267                if rep in consequenceKeys:
6268                    ab = consequenceKeys[rep]
6269                else:
6270                    ab = currentEffectKey
6271                    consequenceKeys[rep] = ab
6272                    currentEffectKey = utils.nextAbbrKey(
6273                        currentEffectKey
6274                    )
6275                # Add abbreviation as an edge attribute
6276                edgeAttrs += f'\n      consequence={ab}'
6277            for (tag, value) in info["tags"].items():
6278                # Get string representation of tag value
6279                rep = utils.quoted(toJSON(value))
6280                # Add edge attribute for tag
6281                edgeAttrs += f'\n      t_{tag}={rep}'
6282            if 'annotations' in info:
6283                edgeAttrs += (
6284                    '\n      annotations='
6285                  + utils.quoted(toJSON(info['annotations']))
6286                )
6287            result += f'\n    {decision} -> {destination}'
6288            result += f' [{edgeAttrs}\n    ]'
6289
6290    # Add zone info as subgraph structure
6291    for z, zinfo in graph.zones.items():
6292        parents = utils.quoted(toJSON(sorted(zinfo.parents)))
6293        if clusterLevels == '*' or zinfo.level in clusterLevels:
6294            zName = "cluster_" + z
6295        else:
6296            zName = '_' + z
6297        zoneSubgraph = f'\n  subgraph {utils.quoted(zName)} {{'
6298        zoneSubgraph += f'\n    label={z}'
6299        zoneSubgraph += f'\n    level={zinfo.level}'
6300        zoneSubgraph += f'\n    parents={parents}'
6301        for decision in sorted(graph.allDecisionsInZone(z)):
6302            zoneSubgraph += f'\n    {decision}'
6303        zoneSubgraph += '\n  }'
6304        result += zoneSubgraph
6305
6306    # Add equivalences, mechanisms, etc.
6307    for attr in [
6308        "equivalences",
6309        "reversionTypes",
6310        "mechanisms",
6311        "globalMechanisms",
6312        "nameLookup"
6313    ]:
6314        aRep = utils.quoted(toJSON(getattr(graph, attr)))
6315        result += f'\n  {attr}={aRep}'
6316
6317    # Add legend subgraphs to represent abbreviations
6318    useID = graph.nextID
6319    if reqKeys:
6320        result += '\n  subgraph __requirements__ {'
6321        for rrepr, ab in reqKeys.items():
6322            nStr = utils.quoted(ab + ' = ' + rrepr)
6323            result += (
6324                f"\n    {useID} [ label={nStr} ]"
6325            )
6326            useID += 1
6327        result += '\n  }'
6328
6329    if consequenceKeys:
6330        result += '\n  subgraph __consequences__ {'
6331        for erepr, ab in consequenceKeys.items():
6332            nStr = utils.quoted(ab + ' = ' + erepr)
6333            result += (
6334                f"\n    {useID} [ label={nStr} ]"
6335            )
6336            useID += 1
6337        result += '\n  }'
6338
6339    if graph.mechanisms:
6340        result += '\n  subgraph __mechanisms__ {'
6341        mID: base.MechanismID
6342        mWhere: Optional[base.DecisionID]
6343        mName: base.MechanismName
6344        for (mID, (mWhere, mName)) in graph.mechanisms.items():
6345            qName = utils.quoted(mName)
6346            nStr = utils.quoted(f"{mID}@{mWhere}:{qName}")
6347            result += (
6348                f"\n    {useID} [ label={nStr} ]"
6349            )
6350            useID += 1
6351        result += '\n  }'
6352
6353    result += "\n}\n"
6354    return result
6355
6356
6357#------#
6358# JSON #
6359#------#
6360
6361T = TypeVar("T")
6362"Type var for `loadCustom`."
6363
6364
6365def loadCustom(stream: TextIO, loadAs: Type[T]) -> T:
6366    """
6367    Loads a new JSON-encodable object from the JSON data in the
6368    given text stream (e.g., a file open in read mode). See
6369    `CustomJSONDecoder` for details on the format and which object types
6370    are supported.
6371
6372    This casts the result to the specified type, but errors out with a
6373    `TypeError` if it doesn't match.
6374    """
6375    result = json.load(stream, cls=CustomJSONDecoder)
6376    if isinstance(result, loadAs):
6377        return result
6378    else:
6379        raise TypeError(
6380            f"Expected to load a {loadAs} but got a {type(result)}."
6381        )
6382
6383
6384def saveCustom(
6385    toSave: Union[  # TODO: More in this union?
6386        base.MetricSpace,
6387        core.DecisionGraph,
6388        core.DiscreteExploration,
6389    ],
6390    stream: TextIO
6391) -> None:
6392    """
6393    Saves a JSON-encodable object as JSON into the given text stream
6394    (e.g., a file open in writing mode). See `CustomJSONEncoder` for
6395    details on the format and which types are supported..
6396    """
6397    json.dump(toSave, stream, cls=CustomJSONEncoder)
6398
6399
6400def toJSON(obj: Any) -> str:
6401    """
6402    Defines the standard object -> JSON operation using the
6403    `CustomJSONEncoder` as well as not using `sort_keys`.
6404    """
6405    return CustomJSONEncoder(sort_keys=False).encode(obj)
6406
6407
6408def fromJSON(encoded: str) -> Any:
6409    """
6410    Defines the standard JSON -> object operation using
6411    `CustomJSONDecoder`.
6412    """
6413    return json.loads(encoded, cls=CustomJSONDecoder)
6414
6415
6416class CustomJSONEncoder(json.JSONEncoder):
6417    """
6418    A custom JSON encoder that has special protocols for handling the
6419    smae objects that `CustomJSONDecoder` decodes. It handles these
6420    objects specially so that they can be decoded back to their original
6421    form.
6422
6423    Examples:
6424
6425    >>> from . import core
6426    >>> tupList = [(1, 1), (2, 2)]
6427    >>> encTup = toJSON(tupList)
6428    >>> encTup
6429    '[{"^^d": "t", "values": [1, 1]}, {"^^d": "t", "values": [2, 2]}]'
6430    >>> fromJSON(encTup) == tupList
6431    True
6432    >>> dg = core.DecisionGraph.example('simple')
6433    >>> fromJSON(toJSON(dg)) == dg
6434    True
6435    >>> dg = core.DecisionGraph.example('abc')
6436    >>> zi = dg.getZoneInfo('upZone')
6437    >>> zi
6438    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6439 annotations=[])
6440    >>> zj = toJSON(zi)
6441    >>> zj
6442    '{"^^d": "nt", "name": "ZoneInfo", "values":\
6443 {"level": 1, "parents": {"^^d": "s", "values": []},\
6444 "contents": {"^^d": "s", "values": ["zoneA"]}, "tags": {},\
6445 "annotations": []}}'
6446    >>> fromJSON(toJSON(zi))
6447    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6448 annotations=[])
6449    >>> fromJSON(toJSON(zi)) == zi
6450    True
6451    >>> toJSON({'a': 'b', 1: 2})
6452    '{"^^d": "d", "items": [["a", "b"], [1, 2]]}'
6453    >>> toJSON(((1, 2), (3, 4)))
6454    '{"^^d": "t", "values": [{"^^d": "t", "values": [1, 2]},\
6455 {"^^d": "t", "values": [3, 4]}]}'
6456    >>> toJSON(base.effect(set=('grate', 'open')))
6457    '{"type": "set", "applyTo": "active",\
6458 "value": {"^^d": "t",\
6459 "values": [{"^^d": "nt", "name": "MechanismSpecifier",\
6460 "values": {"domain": null, "zone": null, "decision": null, "name": "grate"}},\
6461 "open"]}, "delay": null, "charges": null, "hidden": false}'
6462    >>> j = toJSON(dg)
6463    >>> expected = (
6464    ... '{"^^d": "DG",'
6465    ... ' "props": {},'
6466    ... ' "node_links": {"directed": true,'
6467    ... ' "multigraph": true,'
6468    ... ' "graph": {},'
6469    ... ' "nodes": ['
6470    ... '{"name": "A", "domain": "main", "tags": {},'
6471    ... ' "annotations": ["This is a multi-word \\\\"annotation.\\\\""],'
6472    ... ' "zones": {"^^d": "s", "values": ["zoneA"]},'
6473    ... ' "mechanisms": {"grate": 0},'
6474    ... ' "id": 0'
6475    ... '},'
6476    ... ' {'
6477    ... '"name": "B",'
6478    ... ' "domain": "main",'
6479    ... ' "tags": {"b": 1, "tag2": "\\\\"value\\\\""},'
6480    ... ' "annotations": [],'
6481    ... ' "zones": {"^^d": "s", "values": ["zoneB"]},'
6482    ... ' "id": 1'
6483    ... '},'
6484    ... ' {'
6485    ... '"name": "C",'
6486    ... ' "domain": "main",'
6487    ... ' "tags": {"aw\\\\"ful": "ha\\'ha"},'
6488    ... ' "annotations": [],'
6489    ... ' "zones": {"^^d": "s", "values": ["zoneA"]},'
6490    ... ' "id": 2'
6491    ... '}'
6492    ... '],'
6493    ... ' "links": ['
6494    ... '{'
6495    ... '"tags": {},'
6496    ... ' "annotations": [],'
6497    ... ' "reciprocal": "right",'
6498    ... ' "source": 0,'
6499    ... ' "target": 1,'
6500    ... ' "key": "left"'
6501    ... '},'
6502    ... ' {'
6503    ... '"tags": {},'
6504    ... ' "annotations": [],'
6505    ... ' "reciprocal": "up_right",'
6506    ... ' "requirement": {"^^d": "R", "value": "grate:open"},'
6507    ... ' "source": 0,'
6508    ... ' "target": 1,'
6509    ... ' "key": "up_left"'
6510    ... '},'
6511    ... ' {'
6512    ... '"tags": {},'
6513    ... ' "annotations": ["Transition \\'annotation.\\'"],'
6514    ... ' "reciprocal": "up",'
6515    ... ' "source": 0,'
6516    ... ' "target": 2,'
6517    ... ' "key": "down"'
6518    ... '},'
6519    ... ' {'
6520    ... '"tags": {},'
6521    ... ' "annotations": [],'
6522    ... ' "reciprocal": "left",'
6523    ... ' "source": 1,'
6524    ... ' "target": 0,'
6525    ... ' "key": "right"'
6526    ... '},'
6527    ... ' {'
6528    ... '"tags": {},'
6529    ... ' "annotations": [],'
6530    ... ' "reciprocal": "up_left",'
6531    ... ' "requirement": {"^^d": "R", "value": "grate:open"},'
6532    ... ' "source": 1,'
6533    ... ' "target": 0,'
6534    ... ' "key": "up_right"'
6535    ... '},'
6536    ... ' {'
6537    ... '"tags": {"fast": 1},'
6538    ... ' "annotations": [],'
6539    ... ' "reciprocal": "down",'
6540    ... ' "source": 2,'
6541    ... ' "target": 0,'
6542    ... ' "key": "up"'
6543    ... '},'
6544    ... ' {'
6545    ... '"tags": {},'
6546    ... ' "annotations": [],'
6547    ... ' "requirement": {"^^d": "R", "value": "!(helmet)"},'
6548    ... ' "consequence": ['
6549    ... '{'
6550    ... '"type": "gain", "applyTo": "active", "value": "helmet",'
6551    ... ' "delay": null, "charges": null, "hidden": false'
6552    ... '},'
6553    ... ' {'
6554    ... '"type": "deactivate",'
6555    ... ' "applyTo": "active", "value": null,'
6556    ... ' "delay": 3, "charges": null, "hidden": false'
6557    ... '}'
6558    ... '],'
6559    ... ' "source": 2,'
6560    ... ' "target": 2,'
6561    ... ' "key": "grab_helmet"'
6562    ... '},'
6563    ... ' {'
6564    ... '"tags": {},'
6565    ... ' "annotations": [],'
6566    ... ' "requirement": {"^^d": "R", "value": "helmet"},'
6567    ... ' "consequence": ['
6568    ... '{"type": "lose", "applyTo": "active", "value": "helmet",'
6569    ... ' "delay": null, "charges": null, "hidden": false},'
6570    ... ' {"type": "gain", "applyTo": "active",'
6571    ... ' "value": {"^^d": "t", "values": ["token", 1]},'
6572    ... ' "delay": null, "charges": null, "hidden": false'
6573    ... '},'
6574    ... ' {"condition":'
6575    ... ' {"^^d": "R", "value": "token*2"},'
6576    ... ' "consequence": ['
6577    ... '{"type": "set", "applyTo": "active",'
6578    ... ' "value": {"^^d": "t", "values": ['
6579    ... '{"^^d": "nt", "name": "MechanismSpecifier",'
6580    ... ' "values": {"domain": null, "zone": null, "decision": null,'
6581    ... ' "name": "grate"}}, "open"]},'
6582    ... ' "delay": null, "charges": null, "hidden": false'
6583    ... '},'
6584    ... ' {"type": "deactivate", "applyTo": "active", "value": null,'
6585    ... ' "delay": null, "charges": null, "hidden": false'
6586    ... '}'
6587    ... '],'
6588    ... ' "alternative": []'
6589    ... '}'
6590    ... '],'
6591    ... ' "source": 2,'
6592    ... ' "target": 2,'
6593    ... ' "key": "pull_lever"'
6594    ... '}'
6595    ... ']'
6596    ... '},'
6597    ... ' "_byEdge": {"^^d": "d", "items":'
6598    ... ' [[0, {"left": 1, "up_left": 1, "down": 2}],'
6599    ... ' [1, {"right": 0, "up_right": 0}],'
6600    ... ' [2, {"up": 0, "grab_helmet": 2, "pull_lever": 2}]]},'
6601    ... ' "zones": {"zoneA":'
6602    ... ' {"^^d": "nt", "name": "ZoneInfo",'
6603    ... ' "values": {'
6604    ... '"level": 0,'
6605    ... ' "parents": {"^^d": "s", "values": ["upZone"]},'
6606    ... ' "contents": {"^^d": "s", "values": [0, 2]},'
6607    ... ' "tags": {},'
6608    ... ' "annotations": []'
6609    ... '}'
6610    ... '},'
6611    ... ' "zoneB":'
6612    ... ' {"^^d": "nt", "name": "ZoneInfo",'
6613    ... ' "values": {'
6614    ... '"level": 0,'
6615    ... ' "parents": {"^^d": "s", "values": []},'
6616    ... ' "contents": {"^^d": "s", "values": [1]},'
6617    ... ' "tags": {},'
6618    ... ' "annotations": []'
6619    ... '}'
6620    ... '},'
6621    ... ' "upZone":'
6622    ... ' {"^^d": "nt", "name": "ZoneInfo",'
6623    ... ' "values": {'
6624    ... '"level": 1,'
6625    ... ' "parents": {"^^d": "s", "values": []},'
6626    ... ' "contents": {"^^d": "s", "values": ["zoneA"]},'
6627    ... ' "tags": {},'
6628    ... ' "annotations": []'
6629    ... '}'
6630    ... '}'
6631    ... '},'
6632    ... ' "unknownCount": 0,'
6633    ... ' "equivalences": {"^^d": "d", "items": ['
6634    ... '[{"^^d": "t", "values": [0, "open"]},'
6635    ... ' {"^^d": "s", "values": ['
6636    ... '{"^^d": "R", "value": "helmet"}]}]'
6637    ... ']},'
6638    ... ' "reversionTypes": {},'
6639    ... ' "nextMechanismID": 1,'
6640    ... ' "mechanisms": {"^^d": "d", "items": ['
6641    ... '[0, {"^^d": "t", "values": [0, "grate"]}]]},'
6642    ... ' "globalMechanisms": {},'
6643    ... ' "nameLookup": {"A": [0], "B": [1], "C": [2]}'
6644    ... '}'
6645    ... )
6646    >>> for i in range(len(j)):
6647    ...     if j[i] != expected[i:i+1]:
6648    ...         print(
6649    ...             'exp: ' + expected[i-10:i+50] + '\\ngot: ' + j[i-10:i+50]
6650    ...         )
6651    ...         break
6652    >>> j == expected
6653    True
6654    >>> rec = fromJSON(j)
6655    >>> rec.nodes == dg.nodes
6656    True
6657    >>> rec.edges == dg.edges
6658    True
6659    >>> rec.unknownCount == dg.unknownCount
6660    True
6661    >>> rec.equivalences == dg.equivalences
6662    True
6663    >>> rec.reversionTypes == dg.reversionTypes
6664    True
6665    >>> rec._byEdge == dg._byEdge
6666    True
6667    >>> rec.zones == dg.zones
6668    True
6669    >>> for diff in dg.listDifferences(rec):
6670    ...     print(diff)
6671    >>> rec == dg
6672    True
6673
6674    `base.MetricSpace` example:
6675
6676    >>> ms = base.MetricSpace("test")
6677    >>> ms.addPoint([2, 3])
6678    0
6679    >>> ms.addPoint([2, 7, 0])
6680    1
6681    >>> ms.addPoint([2, 7])
6682    2
6683    >>> toJSON(ms) # TODO: ^^d entries here
6684    '{"^^d": "MS", "name": "test",\
6685 "points": {"^^d": "d", "items": [[0, [2, 3]], [1, [2, 7,\
6686 0]], [2, [2, 7]]]}, "lastID": 2}'
6687    >>> ms.removePoint(0)
6688    >>> ms.removePoint(1)
6689    >>> ms.removePoint(2)
6690    >>> toJSON(ms)
6691    '{"^^d": "MS", "name": "test", "points": {}, "lastID": 2}'
6692    >>> ms.addPoint([5, 6])
6693    3
6694    >>> ms.addPoint([7, 8])
6695    4
6696    >>> toJSON(ms)
6697    '{"^^d": "MS", "name": "test",\
6698 "points": {"^^d": "d", "items": [[3, [5, 6]], [4, [7, 8]]]}, "lastID": 4}'
6699
6700    # TODO: more examples, including one for a DiscreteExploration
6701    """
6702
6703    def default(self, o: Any) -> Any:
6704        """
6705        Re-writes objects for encoding. We re-write the following
6706        objects:
6707
6708        - `set`
6709        - `dict` (if the keys aren't all strings)
6710        - `tuple`/`namedtuple`
6711        - `ZoneInfo`
6712        - `Requirement`
6713        - `SkillCombination`
6714        - `DecisionGraph`
6715        - `DiscreteExploration`
6716        - `MetricSpace`
6717
6718        TODO: FeatureGraph...
6719        """
6720        if isinstance(o, list):
6721            return [self.default(x) for x in o]
6722
6723        elif isinstance(o, set):
6724            return {
6725                '^^d': 's',
6726                'values': sorted(
6727                    [self.default(e) for e in o],
6728                    key=lambda x: str(x)
6729                )
6730            }
6731
6732        elif isinstance(o, dict):
6733            if all(isinstance(k, str) for k in o):
6734                return {
6735                    k: self.default(v)
6736                    for k, v in o.items()
6737                }
6738            else:
6739                return {
6740                    '^^d': 'd',
6741                    'items': [
6742                        [self.default(k), self.default(v)]
6743                        for (k, v) in o.items()
6744                    ]
6745                }
6746
6747        elif isinstance(o, tuple):
6748            if hasattr(o, '_fields') and hasattr(o, '_asdict'):
6749                # Named tuple
6750                return {
6751                    '^^d': 'nt',
6752                    'name': o.__class__.__name__,
6753                    'values': {
6754                        k: self.default(v)
6755                        for k, v in o._asdict().items()
6756                    }
6757                }
6758            else:
6759                # Normal tuple
6760                return {
6761                    '^^d': 't',
6762                    "values": [self.default(e) for e in o]
6763                }
6764
6765        elif isinstance(o, base.Requirement):
6766            return {
6767                '^^d': 'R',
6768                'value': o.unparse()
6769            }
6770
6771        elif isinstance(o, base.SkillCombination):
6772            return {
6773                '^^d': 'SC',
6774                'value': o.unparse()
6775            }
6776        # TODO: Consequence, Condition, Challenge, and Effect here?
6777
6778        elif isinstance(o, core.DecisionGraph):
6779            return {
6780                '^^d': 'DG',
6781                'props': self.default(o.graph),  # type:ignore [attr-defined]
6782                'node_links': self.default(
6783                    networkx.node_link_data(o, edges="links") # type: ignore
6784                    # TODO: Fix networkx stubs
6785                ),
6786                '_byEdge': self.default(o._byEdge),
6787                'zones': self.default(o.zones),
6788                'unknownCount': o.unknownCount,
6789                'equivalences': self.default(o.equivalences),
6790                'reversionTypes': self.default(o.reversionTypes),
6791                'nextMechanismID': o.nextMechanismID,
6792                'mechanisms': self.default(o.mechanisms),
6793                'globalMechanisms': self.default(o.globalMechanisms),
6794                'nameLookup': self.default(o.nameLookup)
6795            }
6796
6797        elif isinstance(o, core.DiscreteExploration):
6798            return {
6799                '^^d': 'DE',
6800                'situations': self.default(o.situations)
6801            }
6802
6803        elif isinstance(o, base.MetricSpace):
6804            return {
6805                '^^d': 'MS',
6806                'name': o.name,
6807                'points': self.default(o.points),
6808                'lastID': o.lastID()
6809            }
6810
6811        else:
6812            return o
6813
6814    def encode(self, o: Any) -> str:
6815        """
6816        Custom encode function since we need to override behavior for
6817        tuples and dicts.
6818        """
6819        if isinstance(o, (tuple, dict, set)):
6820            o = self.default(o)
6821        elif isinstance(o, list):
6822            o = [self.default(x) for x in o]
6823
6824        try:
6825            return super().encode(o)
6826        except TypeError:
6827            return super().encode(self.default(o))
6828
6829    def iterencode(
6830        self,
6831        o: Any,
6832        _one_shot: bool = False
6833    ) -> Generator[str, None, None]:
6834        """
6835        Custom iterencode function since we need to override behavior for
6836        tuples and dicts.
6837        """
6838        if isinstance(o, (tuple, dict)):
6839            o = self.default(o)
6840
6841        yield from super().iterencode(o, _one_shot=_one_shot)
6842
6843
6844class CustomJSONDecoder(json.JSONDecoder):
6845    """
6846    A custom JSON decoder that has special protocols for handling
6847    several types, including:
6848
6849    - `set`
6850    - `tuple` & `namedtuple`
6851    - `dict` (where keys aren't all strings)
6852    - `Requirement`
6853    - `SkillCombination`
6854    - `DecisionGraph`
6855    - `DiscreteExploration`
6856    - `MetricSpace`
6857
6858    Used by `toJSON`
6859
6860    When initializing it, you can st a custom parse format by supplying
6861    a 'parseFormat' keyword argument; by default a standard
6862    `ParseFormat` will be used.
6863
6864    Examples:
6865
6866    >>> r = base.ReqAny([
6867    ...     base.ReqCapability('power'),
6868    ...     base.ReqTokens('money', 5)
6869    ... ])
6870    >>> s = toJSON(r)
6871    >>> s
6872    '{"^^d": "R", "value": "(power|money*5)"}'
6873    >>> l = fromJSON(s)
6874    >>> r == l
6875    True
6876    >>> o = {1, 2, 'hi'}
6877    >>> s = toJSON(o)
6878    >>> s
6879    '{"^^d": "s", "values": [1, 2, "hi"]}'
6880    >>> l = fromJSON(s)
6881    >>> o == l
6882    True
6883    >>> zi = base.ZoneInfo(1, set(), set(), {}, [])
6884    >>> s = toJSON(zi)
6885    >>> c = (
6886    ... '{"^^d": "nt", "name": "ZoneInfo", "values": {'
6887    ... '"level": 1,'
6888    ... ' "parents": {"^^d": "s", "values": []},'
6889    ... ' "contents": {"^^d": "s", "values": []},'
6890    ... ' "tags": {},'
6891    ... ' "annotations": []'
6892    ... '}}'
6893    ... )
6894    >>> s == c
6895    True
6896    >>> setm = base.effect(set=("door", "open"))
6897    >>> s = toJSON(setm)
6898    >>> f = fromJSON(s)
6899    >>> f == setm
6900    True
6901    >>> pf = ParseFormat()
6902    >>> pf.unparseEffect(f)
6903    'set door:open'
6904    >>> pf.unparseEffect(f) == pf.unparseEffect(setm)
6905    True
6906
6907    TODO: SkillCombination example
6908    """
6909    def __init__(self, *args, **kwargs):
6910        if 'object_hook' in kwargs:
6911            outerHook = kwargs['object_hook']
6912            kwargs['object_hook'] = (
6913                lambda o: outerHook(self.unpack(o))
6914            )
6915            # TODO: What if it's a positional argument? :(
6916        else:
6917            kwargs['object_hook'] = lambda o: self.unpack(o)
6918
6919        if 'parseFormat' in kwargs:
6920            self.parseFormat = kwargs['parseFormat']
6921            del kwargs['parseFormat']
6922        else:
6923            self.parseFormat = ParseFormat()
6924
6925        super().__init__(*args, **kwargs)
6926
6927    def unpack(self, obj: Any) -> Any:
6928        """
6929        Unpacks an object; used as the `object_hook` for decoding.
6930        """
6931        if '^^d' in obj:
6932            asType = obj['^^d']
6933            if asType == 't':
6934                return tuple(obj['values'])
6935
6936            elif asType == 'nt':
6937                g = globals()
6938                name = obj['name']
6939                values = obj['values']
6940                # Use an existing global namedtuple class if there is
6941                # one that goes by the specified name, so that we don't
6942                # create too many spurious equivalent namedtuple
6943                # classes. But fall back on creating a new namedtuple
6944                # class if we need to:
6945                ntClass = g.get(name)
6946                if (
6947                    ntClass is None
6948                 or not issubclass(ntClass, tuple)
6949                 or not hasattr(ntClass, '_asdict')
6950                ):
6951                    # Now try again specifically in the base module where
6952                    # most of our nametuples are defined (TODO: NOT this
6953                    # hack..., but it does make isinstance work...)
6954                    ntClass = getattr(base, name, None)
6955                    if (
6956                        ntClass is None
6957                     or not issubclass(ntClass, tuple)
6958                     or not hasattr(ntClass, '_asdict')
6959                    ):
6960                        # TODO: cache these...
6961                        ntClass = collections.namedtuple(  # type: ignore
6962                            name,
6963                            values.keys()
6964                        )
6965                ntClass = cast(Callable, ntClass)
6966                return ntClass(**values)
6967
6968            elif asType == 's':
6969                return set(obj['values'])
6970
6971            elif asType == 'd':
6972                return dict(obj['items'])
6973
6974            elif asType == 'R':
6975                return self.parseFormat.parseRequirement(obj['value'])
6976
6977            elif asType == 'SC':
6978                return self.parseFormat.parseSkillCombination(obj['value'])
6979
6980            elif asType == 'E':
6981                return self.parseFormat.parseEffect(obj['value'])
6982
6983            elif asType == 'Ch':
6984                return self.parseFormat.parseChallenge(obj['value'])
6985
6986            elif asType == 'Cd':
6987                return self.parseFormat.parseCondition(obj['value'])
6988
6989            elif asType == 'Cq':
6990                return self.parseFormat.parseConsequence(obj['value'])
6991
6992            elif asType == 'DG':
6993                baseGraph: networkx.MultiDiGraph = networkx.node_link_graph(
6994                    obj['node_links'],
6995                    edges="links"
6996                )  # type: ignore
6997                # TODO: Fix networkx stubs
6998                graphResult = core.DecisionGraph()
6999                # Copy over non-internal attributes
7000                for attr in dir(baseGraph):
7001                    if attr == "name":
7002                        continue
7003                    if not attr.startswith('__') or not attr.endswith('__'):
7004                        val = getattr(baseGraph, attr)
7005                        setattr(
7006                            graphResult,
7007                            attr,
7008                            copy.deepcopy(val)
7009                            # TODO: Does this copying disentangle too
7010                            # much? Which values even get copied this
7011                            # way?
7012                        )
7013
7014                if baseGraph.name != '':
7015                    graphResult.name = baseGraph.name
7016                graphResult.graph.update(obj['props'])  # type:ignore [attr-defined]  # noqa
7017                storedByEdge = obj['_byEdge']
7018                graphResult._byEdge = {
7019                    int(k): storedByEdge[k]
7020                    for k in storedByEdge
7021                }
7022                graphResult.zones = obj['zones']
7023                graphResult.unknownCount = obj['unknownCount']
7024                graphResult.equivalences = obj['equivalences']
7025                graphResult.reversionTypes = obj['reversionTypes']
7026                graphResult.nextMechanismID = obj['nextMechanismID']
7027                graphResult.mechanisms = {
7028                    int(k): v
7029                    for k, v in
7030                    obj['mechanisms'].items()
7031                }
7032                graphResult.globalMechanisms = obj['globalMechanisms']
7033                graphResult.nameLookup = obj['nameLookup']
7034                return graphResult
7035
7036            elif asType == 'DE':
7037                exResult = core.DiscreteExploration()
7038                exResult.situations = obj['situations']
7039                return exResult
7040
7041            elif asType == 'MS':
7042                msResult = base.MetricSpace(obj['name'])
7043                msResult.points = obj['points']
7044                msResult.nextID = obj['lastID'] + 1
7045                return msResult
7046
7047            else:
7048                raise NotImplementedError(
7049                    f"No special handling has been defined for"
7050                    f" decoding type '{asType}'."
7051                )
7052
7053        else:
7054            return obj
class Lexeme(enum.IntEnum):

These are the different separators, grouping characters, and keywords used as part of parsing. The characters that are actually recognized are defined as part of a Format.

domainSeparator = <Lexeme.domainSeparator: 1>
zoneSeparator = <Lexeme.zoneSeparator: 2>
partSeparator = <Lexeme.partSeparator: 3>
stateOn = <Lexeme.stateOn: 4>
stateOff = <Lexeme.stateOff: 5>
tokenCount = <Lexeme.tokenCount: 6>
effectCharges = <Lexeme.effectCharges: 7>
sepOrDelay = <Lexeme.sepOrDelay: 8>
consequenceSeparator = <Lexeme.consequenceSeparator: 9>
inCommon = <Lexeme.inCommon: 10>
isHidden = <Lexeme.isHidden: 11>
skillLevel = <Lexeme.skillLevel: 12>
wigglyLine = <Lexeme.wigglyLine: 13>
withDetails = <Lexeme.withDetails: 14>
reciprocalSeparator = <Lexeme.reciprocalSeparator: 15>
mechanismSeparator = <Lexeme.mechanismSeparator: 16>
openCurly = <Lexeme.openCurly: 17>
closeCurly = <Lexeme.closeCurly: 18>
openParen = <Lexeme.openParen: 19>
closeParen = <Lexeme.closeParen: 20>
angleLeft = <Lexeme.angleLeft: 21>
angleRight = <Lexeme.angleRight: 22>
doubleQuestionmark = <Lexeme.doubleQuestionmark: 23>
ampersand = <Lexeme.ampersand: 24>
orBar = <Lexeme.orBar: 25>
notMarker = <Lexeme.notMarker: 26>
Inherited Members
enum.Enum
name
value
builtins.int
conjugate
bit_length
bit_count
to_bytes
from_bytes
as_integer_ratio
real
imag
numerator
denominator
Format = typing.Dict[Lexeme, str]

A journal format is specified using a dictionary with keys that denote journal marker types and values which are one-to-several-character strings indicating the markup used for that entry/info type.

DEFAULT_FORMAT: Dict[Lexeme, str] = {<Lexeme.domainSeparator: 1>: '//', <Lexeme.zoneSeparator: 2>: '::', <Lexeme.partSeparator: 3>: '%%', <Lexeme.stateOn: 4>: '=on', <Lexeme.stateOff: 5>: '=off', <Lexeme.tokenCount: 6>: '*', <Lexeme.effectCharges: 7>: '=', <Lexeme.sepOrDelay: 8>: ',', <Lexeme.consequenceSeparator: 9>: ';', <Lexeme.inCommon: 10>: '+c', <Lexeme.isHidden: 11>: '+h', <Lexeme.skillLevel: 12>: '^', <Lexeme.wigglyLine: 13>: '~', <Lexeme.withDetails: 14>: '%', <Lexeme.reciprocalSeparator: 15>: '/', <Lexeme.mechanismSeparator: 16>: ':', <Lexeme.openCurly: 17>: '{', <Lexeme.closeCurly: 18>: '}', <Lexeme.openParen: 19>: '(', <Lexeme.closeParen: 20>: ')', <Lexeme.angleLeft: 21>: '<', <Lexeme.angleRight: 22>: '>', <Lexeme.doubleQuestionmark: 23>: '??', <Lexeme.ampersand: 24>: '&', <Lexeme.orBar: 25>: '|', <Lexeme.notMarker: 26>: '!'}

The default parsing format.

DEFAULT_EFFECT_NAMES: Dict[str, Literal['gain', 'lose', 'set', 'toggle', 'deactivate', 'edit', 'goto', 'bounce', 'follow', 'save']] = {'gain': 'gain', 'lose': 'lose', 'set': 'set', 'toggle': 'toggle', 'deactivate': 'deactivate', 'edit': 'edit', 'goto': 'goto', 'bounce': 'bounce', 'follow': 'follow', 'save': 'save'}

Default names for each effect type. Maps names to canonical effect type strings. A different mapping could be used to allow for writing effect names in another language, for example.

DEFAULT_FOCALIZATION_NAMES: Dict[str, Literal['singular', 'plural', 'spreading']] = {'singular': 'singular', 'plural': 'plural', 'spreading': 'spreading'}

Default names for each domain focalization type. Maps each focalization type string to itself.

DEFAULT_SF_INDICATORS: Tuple[str, str] = ('s', 'f')

Default characters used to indicate success/failure when transcribing a TransitionWithOutcomes.

class ParseWarning(builtins.Warning):
139class ParseWarning(Warning):
140    """
141    Represents a warning encountered when parsing something.
142    """
143    pass

Represents a warning encountered when parsing something.

Inherited Members
builtins.Warning
Warning
builtins.BaseException
with_traceback
add_note
args
class ParseError(builtins.ValueError):
146class ParseError(ValueError):
147    """
148    Represents a error encountered when parsing.
149    """
150    pass

Represents a error encountered when parsing.

Inherited Members
builtins.ValueError
ValueError
builtins.BaseException
with_traceback
add_note
args
class DotParseError(ParseError):
153class DotParseError(ParseError):
154    """
155    An error raised during parsing when incorrectly-formatted graphviz
156    "dot" data is provided. See `parseDot`.
157    """
158    pass

An error raised during parsing when incorrectly-formatted graphviz "dot" data is provided. See parseDot.

Inherited Members
builtins.ValueError
ValueError
builtins.BaseException
with_traceback
add_note
args
class InvalidFeatureSpecifierError(ParseError):
161class InvalidFeatureSpecifierError(ParseError):
162    """
163    An error used when a feature specifier is in the wrong format.
164    Errors with part specifiers also use this.
165    """

An error used when a feature specifier is in the wrong format. Errors with part specifiers also use this.

Inherited Members
builtins.ValueError
ValueError
builtins.BaseException
with_traceback
add_note
args
LexedTokens: TypeAlias = List[Union[Lexeme, str]]

When lexing, we pull apart a string into pieces, but when we recognize lexemes, we use their integer IDs in the list instead of strings, so we get a list that's a mix of ints and strings.

GroupedTokens: TypeAlias = List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]

Some parsing processes group tokens into sub-lists. This type represents LexedTokens which might also contain sub-lists, to arbitrary depth.

GroupedRequirementParts: TypeAlias = List[Union[Lexeme, exploration.base.Requirement, ForwardRef('GroupedRequirementParts')]]

Another intermediate parsing result during requirement parsing: a list of base.Requirements possibly with some sub-lists and/or Lexemes mixed in.

def lex( characters: str, tokenMap: Optional[Dict[str, Lexeme]] = None) -> List[Union[Lexeme, str]]:
195def lex(
196    characters: str,
197    tokenMap: Optional[Dict[str, Lexeme]] = None
198) -> LexedTokens:
199    """
200    Lexes a list of tokens from a characters string. Recognizes any
201    special characters you provide in the token map, as well as
202    collections of non-mapped characters. Recognizes double-quoted
203    strings which can contain any of those (and which use
204    backslash-escapes for internal double quotes) and includes quoted
205    versions of those strings as tokens (any token string starting with a
206    double quote will be such a string). Breaks tokens on whitespace
207    outside of quotation marks, and ignores that whitespace.
208
209    Examples:
210
211    >>> lex('abc')
212    ['abc']
213    >>> lex('(abc)', {'(': 0, ')': 1})
214    [0, 'abc', 1]
215    >>> lex('{(abc)}', {'(': 0, ')': 1, '{': 2, '}': 3})
216    [2, 0, 'abc', 1, 3]
217    >>> lex('abc def')
218    ['abc', 'def']
219    >>> lex('abc   def')
220    ['abc', 'def']
221    >>> lex('abc \\n def')
222    ['abc', 'def']
223    >>> lex ('"quoted"')
224    ['"quoted"']
225    >>> lex ('"quoted  pair"')
226    ['"quoted  pair"']
227    >>> lex ('  oneWord | "two words"|"three  words words" ', {'|': 0})
228    ['oneWord', 0, '"two words"', 0, '"three  words words"']
229    >>> tokenMap = { c: i for (i, c) in enumerate("(){}~:;>,") }
230    >>> tokenMap['::'] = 9
231    >>> tokenMap['~~'] = 10
232    >>> lex(
233    ...     '{~~2:best(brains, brawn)>{set switch on}'
234    ...     '{deactivate ,1; bounce}}',
235    ...     tokenMap
236    ... )
237    [2, 10, '2', 5, 'best', 0, 'brains', 8, 'brawn', 1, 7, 2, 'set',\
238 'switch', 'on', 3, 2, 'deactivate', 8, '1', 6, 'bounce', 3, 3]
239    >>> lex('set where::mechanism state', tokenMap)
240    ['set', 'where', 9, 'mechanism', 'state']
241    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
242    >>> esc = r'"escape \\\\a"'
243    >>> result = [ r'"escape \\\\a"' ]  # 'quoted' doubles the backslash
244    >>> len(esc)
245    12
246    >>> len(result[0])
247    12
248    >>> lex(esc) == result
249    True
250    >>> quoteInQuote = r'before "hello \\\\ \\" goodbye"after'
251    >>> # Note r' doesn't take full effect 'cause we're in triple quotes
252    >>> expect = ['before', r'"hello \\\\ \\" goodbye"', 'after']
253    >>> lex(quoteInQuote) == expect
254    True
255    >>> lex('O\\'Neill')
256    ["O'Neill"]
257    >>> lex('one "quote ')
258    ['one', '"quote "']
259    >>> lex('geo*15', {'*': 0})
260    ['geo', 0, '15']
261    """
262    if tokenMap is None:
263        tokenMap = {}
264    tokenStarts: Dict[str, List[str]] = {}
265    for key in sorted(tokenMap.keys(), key=lambda x: -len(x)):
266        tokenStarts.setdefault(key[:1], []).append(key)
267    tokens: LexedTokens = []
268    sofar = ''
269    inQuote = False
270    escaped = False
271    skip = 0
272    for i in range(len(characters)):
273        if skip > 0:
274            skip -= 1
275            continue
276
277        char = characters[i]
278        if escaped:
279            # TODO: Escape sequences?
280            sofar += char
281            escaped = False
282
283        elif char == '\\':
284            if inQuote:
285                escaped = True
286            else:
287                sofar += char
288
289        elif char == '"':
290            if sofar != '':
291                if inQuote:
292                    tokens.append(utils.quoted(sofar))
293                else:
294                    tokens.append(sofar)
295            sofar = ''
296            inQuote = not inQuote
297
298        elif inQuote:
299            sofar += char
300
301        elif char in tokenStarts:
302            options = tokenStarts[char]
303            hit: Optional[str] = None
304            for possibility in options:
305                lp = len(possibility)
306                if (
307                    (lp == 1 and char == possibility)
308                or characters[i:i + lp] == possibility
309                ):
310                    hit = possibility
311                    break
312
313            if hit is not None:
314                if sofar != '':
315                    tokens.append(sofar)
316                tokens.append(tokenMap[possibility])
317                sofar = ''
318                skip = len(hit) - 1
319            else:  # Not actually a recognized token
320                sofar += char
321
322        elif char.isspace():
323            if sofar != '':
324                tokens.append(sofar)
325            sofar = ''
326
327        else:
328            sofar += char
329
330    if sofar != '':
331        if inQuote:
332            tokens.append(utils.quoted(sofar))
333        else:
334            tokens.append(sofar)
335
336    return tokens

Lexes a list of tokens from a characters string. Recognizes any special characters you provide in the token map, as well as collections of non-mapped characters. Recognizes double-quoted strings which can contain any of those (and which use backslash-escapes for internal double quotes) and includes quoted versions of those strings as tokens (any token string starting with a double quote will be such a string). Breaks tokens on whitespace outside of quotation marks, and ignores that whitespace.

Examples:

>>> lex('abc')
['abc']
>>> lex('(abc)', {'(': 0, ')': 1})
[0, 'abc', 1]
>>> lex('{(abc)}', {'(': 0, ')': 1, '{': 2, '}': 3})
[2, 0, 'abc', 1, 3]
>>> lex('abc def')
['abc', 'def']
>>> lex('abc   def')
['abc', 'def']
>>> lex('abc \n def')
['abc', 'def']
>>> lex ('"quoted"')
['"quoted"']
>>> lex ('"quoted  pair"')
['"quoted  pair"']
>>> lex ('  oneWord | "two words"|"three  words words" ', {'|': 0})
['oneWord', 0, '"two words"', 0, '"three  words words"']
>>> tokenMap = { c: i for (i, c) in enumerate("(){}~:;>,") }
>>> tokenMap['::'] = 9
>>> tokenMap['~~'] = 10
>>> lex(
...     '{~~2:best(brains, brawn)>{set switch on}'
...     '{deactivate ,1; bounce}}',
...     tokenMap
... )
[2, 10, '2', 5, 'best', 0, 'brains', 8, 'brawn', 1, 7, 2, 'set', 'switch', 'on', 3, 2, 'deactivate', 8, '1', 6, 'bounce', 3, 3]
>>> lex('set where::mechanism state', tokenMap)
['set', 'where', 9, 'mechanism', 'state']
>>> # Note r' doesn't take full effect 'cause we're in triple quotes
>>> esc = r'"escape \\a"'
>>> result = [ r'"escape \\a"' ]  # 'quoted' doubles the backslash
>>> len(esc)
12
>>> len(result[0])
12
>>> lex(esc) == result
True
>>> quoteInQuote = r'before "hello \\ \" goodbye"after'
>>> # Note r' doesn't take full effect 'cause we're in triple quotes
>>> expect = ['before', r'"hello \\ \" goodbye"', 'after']
>>> lex(quoteInQuote) == expect
True
>>> lex('O\'Neill')
["O'Neill"]
>>> lex('one "quote ')
['one', '"quote "']
>>> lex('geo*15', {'*': 0})
['geo', 0, '15']
def unLex( tokens: List[Union[Lexeme, str]], tokenMap: Optional[Dict[str, Lexeme]] = None) -> str:
339def unLex(
340    tokens: LexedTokens,
341    tokenMap: Optional[Dict[str, Lexeme]] = None
342) -> str:
343    """
344    Turns lexed stuff back into a string, substituting strings back into
345    token spots by reversing the given token map. Adds quotation marks to
346    complex tokens where necessary to prevent them from re-lexing into
347    multiple tokens (but `lex` doesn't  remove those, so in some cases
348    there's not a perfect round-trip unLex -> lex).
349
350    For example:
351
352    >>> unLex(['a', 'b'])
353    'a b'
354    >>> tokens = {'(': 0, ')': 1, '{': 2, '}': 3, '::': 4}
355    >>> unLex([0, 'hi', 1], tokens)
356    '(hi)'
357    >>> unLex([0, 'visit', 'zone', 4, 'decision', 1], tokens)
358    '(visit zone::decision)'
359    >>> q = unLex(['a complex token', '\\'single\\' and "double" quotes'])
360    >>> q  # unLex adds quotes
361    '"a complex token" "\\'single\\' and \\\\"double\\\\" quotes"'
362    >>> lex(q)  # Not the same as the original list
363    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
364    >>> lex(unLex(lex(q)))  # But further round-trips work
365    ['"a complex token"', '"\\'single\\' and \\\\"double\\\\" quotes"']
366
367    TODO: Fix this:
368    For now, it generates incorrect results when token combinations can
369    be ambiguous. These ambiguous token combinations should not ever be
370    generated by `lex` at least. For example:
371
372    >>> ambiguous = {':': 0, '::': 1}
373    >>> u = unLex(['a', 0, 0, 'b'], ambiguous)
374    >>> u
375    'a::b'
376    >>> l = lex(u, ambiguous)
377    >>> l
378    ['a', 1, 'b']
379    >>> l == u
380    False
381    """
382    if tokenMap is None:
383        nTokens = 0
384        revMap = {}
385    else:
386        nTokens = len(tokenMap)
387        revMap = {y: x for (x, y) in tokenMap.items()}
388
389    prevRaw = False
390    # TODO: add spaces where necessary to disambiguate token sequences...
391    if len(revMap) != nTokens:
392        warnings.warn(
393            (
394                "Irreversible token map! Two or more tokens have the same"
395                " integer value."
396            ),
397            ParseWarning
398        )
399
400    result = ""
401    for item in tokens:
402        if isinstance(item, int):
403            try:
404                result += revMap[item]
405            except KeyError:
406                raise ValueError(
407                    f"Tokens list contains {item} but the token map"
408                    f" does not have any entry which maps to {item}."
409                )
410            prevRaw = False
411        elif isinstance(item, str):
412            if prevRaw:
413                result += ' '
414            if len(lex(item)) > 1:
415                result += utils.quoted(item)
416            else:
417                result += item
418            prevRaw = True
419        else:
420            raise TypeError(
421                f"Token list contained non-int non-str item:"
422                f" {repr(item)}"
423            )
424
425    return result

Turns lexed stuff back into a string, substituting strings back into token spots by reversing the given token map. Adds quotation marks to complex tokens where necessary to prevent them from re-lexing into multiple tokens (but lex doesn't remove those, so in some cases there's not a perfect round-trip unLex -> lex).

For example:

>>> unLex(['a', 'b'])
'a b'
>>> tokens = {'(': 0, ')': 1, '{': 2, '}': 3, '::': 4}
>>> unLex([0, 'hi', 1], tokens)
'(hi)'
>>> unLex([0, 'visit', 'zone', 4, 'decision', 1], tokens)
'(visit zone::decision)'
>>> q = unLex(['a complex token', '\'single\' and "double" quotes'])
>>> q  # unLex adds quotes
'"a complex token" "\'single\' and \\"double\\" quotes"'
>>> lex(q)  # Not the same as the original list
['"a complex token"', '"\'single\' and \\"double\\" quotes"']
>>> lex(unLex(lex(q)))  # But further round-trips work
['"a complex token"', '"\'single\' and \\"double\\" quotes"']

TODO: Fix this: For now, it generates incorrect results when token combinations can be ambiguous. These ambiguous token combinations should not ever be generated by lex at least. For example:

>>> ambiguous = {':': 0, '::': 1}
>>> u = unLex(['a', 0, 0, 'b'], ambiguous)
>>> u
'a::b'
>>> l = lex(u, ambiguous)
>>> l
['a', 1, 'b']
>>> l == u
False
def normalizeEnds(tokens: List, start: int, end: int) -> Tuple[int, int, int]:
432def normalizeEnds(
433    tokens: List,
434    start: int,
435    end: int
436) -> Tuple[int, int, int]:
437    """
438    Given a tokens list and start & end integers, does some bounds
439    checking and normalization on the integers: converts negative
440    indices to positive indices, and raises an `IndexError` if they're
441    out-of-bounds after conversion. Returns a tuple containing the
442    normalized start & end indices, along with the number of tokens they
443    cover.
444    """
445    totalTokens = len(tokens)
446    if start < -len(tokens):
447        raise IndexError(
448            f"Negative start index out of bounds (got {start} for"
449            f" {totalTokens} tokens)."
450        )
451    elif start >= totalTokens:
452        raise IndexError(
453            f"Start index out of bounds (got {start} for"
454            f" {totalTokens} tokens)."
455        )
456    elif start < 0:
457        start = totalTokens + start
458
459    if end < -len(tokens):
460        raise IndexError(
461            f"Negative end index out of bounds (got {end} for"
462            f" {totalTokens} tokens)."
463        )
464    elif end >= totalTokens:
465        raise IndexError(
466            f"Start index out of bounds (got {end} for"
467            f" {totalTokens} tokens)."
468        )
469    elif end < 0:
470        end = totalTokens + end
471
472    if end >= len(tokens):
473        end = len(tokens) - 1
474
475    return (start, end, (end - start) + 1)

Given a tokens list and start & end integers, does some bounds checking and normalization on the integers: converts negative indices to positive indices, and raises an IndexError if they're out-of-bounds after conversion. Returns a tuple containing the normalized start & end indices, along with the number of tokens they cover.

def findSeparatedParts( tokens: List[Union[Lexeme, str]], sep: Union[str, int], start: int = 0, end: int = -1, groupStart: Union[str, int, NoneType] = None, groupEnd: Union[str, int, NoneType] = None) -> Generator[Tuple[int, int], NoneType, NoneType]:
478def findSeparatedParts(
479    tokens: LexedTokens,
480    sep: Union[str, int],
481    start: int = 0,
482    end: int = -1,
483    groupStart: Union[str, int, None] = None,
484    groupEnd: Union[str, int, None] = None
485) -> Generator[Tuple[int, int], None, None]:
486    """
487    Finds parts separated by a separator lexeme, such as ';' or ',', but
488    ignoring separators nested within groupStart/groupEnd pairs (if
489    those arguments are supplied). For each token sequence found, yields
490    a tuple containing the start index and end index for that part, with
491    separators not included in the parts.
492
493    If two separators appear in a row, the start/end pair will have a
494    start index one after the end index.
495
496    If there are no separators, yields one pair containing the start and
497    end of the entire tokens sequence.
498
499    Raises a `ParseError` if there are unbalanced grouping elements.
500
501    For example:
502
503    >>> list(findSeparatedParts(
504    ...     [ 'one' ],
505    ...     Lexeme.sepOrDelay,
506    ...     0,
507    ...     0,
508    ...     Lexeme.openParen,
509    ...     Lexeme.closeParen
510    ... ))
511    [(0, 0)]
512    >>> list(findSeparatedParts(
513    ...     [
514    ...         'best',
515    ...         Lexeme.openParen,
516    ...         'chess',
517    ...         Lexeme.sepOrDelay,
518    ...         'checkers',
519    ...         Lexeme.closeParen
520    ...     ],
521    ...     Lexeme.sepOrDelay,
522    ...     2,
523    ...     4,
524    ...     Lexeme.openParen,
525    ...     Lexeme.closeParen
526    ... ))
527    [(2, 2), (4, 4)]
528    """
529    start, end, n = normalizeEnds(tokens, start, end)
530    level = 0
531    thisStart = start
532    for i in range(start, end + 1):
533        token = tokens[i]
534        if token == sep and level == 0:
535            yield (thisStart, i - 1)
536            thisStart = i + 1
537        elif token == groupStart:
538            level += 1
539        elif token == groupEnd:
540            level -= 1
541            if level < 0:
542                raise ParseError("Unbalanced grouping tokens.")
543    if level < 0:
544        raise ParseError("Unbalanced grouping tokens.")
545    yield (thisStart, end)

Finds parts separated by a separator lexeme, such as ';' or ',', but ignoring separators nested within groupStart/groupEnd pairs (if those arguments are supplied). For each token sequence found, yields a tuple containing the start index and end index for that part, with separators not included in the parts.

If two separators appear in a row, the start/end pair will have a start index one after the end index.

If there are no separators, yields one pair containing the start and end of the entire tokens sequence.

Raises a ParseError if there are unbalanced grouping elements.

For example:

>>> list(findSeparatedParts(
...     [ 'one' ],
...     Lexeme.sepOrDelay,
...     0,
...     0,
...     Lexeme.openParen,
...     Lexeme.closeParen
... ))
[(0, 0)]
>>> list(findSeparatedParts(
...     [
...         'best',
...         Lexeme.openParen,
...         'chess',
...         Lexeme.sepOrDelay,
...         'checkers',
...         Lexeme.closeParen
...     ],
...     Lexeme.sepOrDelay,
...     2,
...     4,
...     Lexeme.openParen,
...     Lexeme.closeParen
... ))
[(2, 2), (4, 4)]
K = ~K

Type variable for dictionary keys.

V = ~V

Type variable for dictionary values.

def checkCompleteness( name, mapping: Dict[~K, ~V], keysSet: Optional[Set[~K]] = None, valuesSet: Optional[Set[~V]] = None):
554def checkCompleteness(
555    name,
556    mapping: Dict[K, V],
557    keysSet: Optional[Set[K]] = None,
558    valuesSet: Optional[Set[V]] = None
559):
560    """
561    Checks that a dictionary has a certain exact set of keys (or
562    values). Raises a `ValueError` if it finds an extra or missing key
563    or value.
564    """
565    if keysSet is not None:
566        for key in mapping.keys():
567            if key not in keysSet:
568                raise ValueError("{name} has extra key {repr(key)}.")
569
570        for key in keysSet:
571            if key not in mapping:
572                raise ValueError("{name} is missing key {repr(key)}.")
573
574    if valuesSet is not None:
575        for value in mapping.values():
576            if value not in valuesSet:
577                raise ValueError("{name} has extra value {repr(value)}.")
578
579        checkVals = mapping.values()
580        for value in valuesSet:
581            if value not in checkVals:
582                raise ValueError("{name} is missing value {repr(value)}.")

Checks that a dictionary has a certain exact set of keys (or values). Raises a ValueError if it finds an extra or missing key or value.

class ParseFormat:
 585class ParseFormat:
 586    """
 587    A ParseFormat manages the mapping from markers to entry types and
 588    vice versa.
 589    """
 590    def __init__(
 591        self,
 592        formatDict: Format = DEFAULT_FORMAT,
 593        effectNames: Dict[str, base.EffectType] = DEFAULT_EFFECT_NAMES,
 594        focalizationNames: Dict[
 595            str,
 596            base.DomainFocalization
 597        ] = DEFAULT_FOCALIZATION_NAMES,
 598        successFailureIndicators: Tuple[str, str] = DEFAULT_SF_INDICATORS
 599    ):
 600        """
 601        Sets up the parsing format. Requires a `Format` dictionary to
 602        define the specifics. Raises a `ValueError` unless the keys of
 603        the `Format` dictionary exactly match the `Lexeme` values.
 604        """
 605        self.formatDict = formatDict
 606        self.effectNames = effectNames
 607        self.focalizationNames = focalizationNames
 608        if (
 609            len(successFailureIndicators) != 2
 610        or any(len(i) != 1 for i in successFailureIndicators)
 611        ):
 612            raise ValueError(
 613                f"Invalid success/failure indicators: must be a pair of"
 614                f" length-1 strings. Got: {successFailureIndicators!r}"
 615            )
 616        self.successIndicator, self.failureIndicator = (
 617            successFailureIndicators
 618        )
 619
 620        # Check completeness for each dictionary
 621        checkCompleteness('formatDict', self.formatDict, set(Lexeme))
 622        checkCompleteness(
 623            'effectNames',
 624            self.effectNames,
 625            valuesSet=set(get_args(base.EffectType))
 626        )
 627        checkCompleteness(
 628            'focalizationNames',
 629            self.focalizationNames,
 630            valuesSet=set(get_args(base.DomainFocalization))
 631        )
 632
 633        # Build some reverse lookup dictionaries for specific
 634        self.reverseFormat = {y: x for (x, y) in self.formatDict.items()}
 635
 636        # circumstances:
 637        self.effectModMap = {
 638            self.formatDict[x]: x
 639            for x in [
 640                Lexeme.effectCharges,
 641                Lexeme.sepOrDelay,
 642                Lexeme.inCommon,
 643                Lexeme.isHidden
 644            ]
 645        }
 646
 647    def lex(self, content: str) -> LexedTokens:
 648        """
 649        Applies `lex` using this format's lexeme mapping.
 650        """
 651        return lex(content, self.reverseFormat)
 652
 653    def onOff(self, word: str) -> Optional[bool]:
 654        """
 655        Parse an on/off indicator and returns a boolean (`True` for on
 656        and `False` for off). Returns `None` if the word isn't either
 657        the 'on' or the 'off' word. Generates a `ParseWarning`
 658        (and still returns `None`) if the word is a case-swapped version
 659        of the 'on' or 'off' word and is not equal to either of them.
 660        """
 661        onWord = self.formatDict[Lexeme.stateOn]
 662        offWord = self.formatDict[Lexeme.stateOff]
 663
 664        # Generate warning if we suspect a case error
 665        if (
 666            word.casefold() in (onWord, offWord)
 667        and word not in (onWord, offWord)
 668        ):
 669            warnings.warn(
 670                (
 671                    f"Word '{word}' cannot be interpreted as an on/off"
 672                    f" value, although it is almost one (the correct"
 673                    f" values are '{onWord}' and '{offWord}'."
 674                ),
 675                ParseWarning
 676            )
 677
 678        # return the appropriate value
 679        if word == onWord:
 680            return True
 681        elif word == offWord:
 682            return False
 683        else:
 684            return None
 685
 686    def matchingBrace(
 687        self,
 688        tokens: LexedTokens,
 689        where: int,
 690        opener: int = Lexeme.openCurly,
 691        closer: int = Lexeme.closeCurly
 692    ) -> int:
 693        """
 694        Returns the index within the given tokens list of the closing
 695        curly brace which matches the open brace at the specified index.
 696        You can specify custom `opener` and/or `closer` lexemes to find
 697        matching pairs of other things. Raises a `ParseError` if there
 698        is no opening brace at the specified index, or if there isn't a
 699        matching closing brace. Handles nested braces of the specified
 700        type.
 701
 702        Examples:
 703        >>> pf = ParseFormat()
 704        >>> ob = Lexeme.openCurly
 705        >>> cb = Lexeme.closeCurly
 706        >>> pf.matchingBrace([ob, cb], 0)
 707        1
 708        >>> pf.matchingBrace([ob, cb], 1)
 709        Traceback (most recent call last):
 710          ...
 711        exploration.parsing.ParseError: ...
 712        >>> pf.matchingBrace(['hi', ob, cb], 0)
 713        Traceback (most recent call last):
 714          ...
 715        exploration.parsing.ParseError: ...
 716        >>> pf.matchingBrace(['hi', ob, cb], 1)
 717        2
 718        >>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
 719        3
 720        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
 721        Traceback (most recent call last):
 722          ...
 723        exploration.parsing.ParseError: ...
 724        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
 725        3
 726        >>> pf.matchingBrace([ob, ob, cb, cb], 0)
 727        3
 728        >>> pf.matchingBrace([ob, ob, cb, cb], 1)
 729        2
 730        >>> pf.matchingBrace([ob, cb, ob, cb], 0)
 731        1
 732        >>> pf.matchingBrace([ob, cb, ob, cb], 2)
 733        3
 734        >>> pf.matchingBrace([ob, cb, cb, cb], 0)
 735        1
 736        >>> pf.matchingBrace([ob, ob, ob, cb], 0)
 737        Traceback (most recent call last):
 738          ...
 739        exploration.parsing.ParseError: ...
 740        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
 741        7
 742        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
 743        6
 744        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
 745        Traceback (most recent call last):
 746          ...
 747        exploration.parsing.ParseError: ...
 748        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
 749        4
 750        >>> op = Lexeme.openParen
 751        >>> cp = Lexeme.closeParen
 752        >>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
 753        3
 754        """
 755        if where >= len(tokens):
 756            raise ParseError(
 757                f"Out-of-bounds brace start: index {where} with"
 758                f" {len(tokens)} tokens."
 759            )
 760        if tokens[where] != opener:
 761            raise ParseError(
 762                f"Can't find matching brace for token"
 763                f" {repr(tokens[where])} at index {where} because it's"
 764                f" not an open brace."
 765            )
 766
 767        level = 1
 768        for i in range(where + 1, len(tokens)):
 769            token = tokens[i]
 770            if token == opener:
 771                level += 1
 772            elif token == closer:
 773                level -= 1
 774                if level == 0:
 775                    return i
 776
 777        raise ParseError(
 778            f"Failed to find matching curly brace from index {where}."
 779        )
 780
 781    def parseFocalization(self, word: str) -> base.DomainFocalization:
 782        """
 783        Parses a focalization type for a domain, recognizing
 784        'domainFocalizationSingular', 'domainFocalizationPlural', and
 785        'domainFocalizationSpreading'.
 786        """
 787        try:
 788            return self.focalizationNames[word]
 789        except KeyError:
 790            raise ParseError(
 791                f"Invalid domain focalization name {repr(word)}. Valid"
 792                f" name are: {repr(list(self.focalizationNames))}'."
 793            )
 794
 795    def parseTagValue(self, value: str) -> base.TagValue:
 796        """
 797        Converts a string to a tag value, following these rules:
 798
 799        1. If the string is exactly one of 'None', 'True', or 'False', we
 800            convert it to the corresponding Python value.
 801        2. If the string can be converted to an integer without raising a
 802            ValueError, we use that integer.
 803        3. If the string can be converted to a float without raising a
 804            ValueError, we use that float.
 805        4. Otherwise, it remains a string.
 806
 807        Note that there is currently no syntax for using list, dictionary,
 808        Requirement, or Consequence tag values.
 809        TODO: Support those types?
 810
 811        Examples:
 812
 813        >>> pf = ParseFormat()
 814        >>> pf.parseTagValue('hi')
 815        'hi'
 816        >>> pf.parseTagValue('3')
 817        3
 818        >>> pf.parseTagValue('3.0')
 819        3.0
 820        >>> pf.parseTagValue('True')
 821        True
 822        >>> pf.parseTagValue('False')
 823        False
 824        >>> pf.parseTagValue('None') is None
 825        True
 826        >>> pf.parseTagValue('none')
 827        'none'
 828        """
 829        # TODO: Allow these keywords to be redefined?
 830        if value == 'True':
 831            return True
 832        elif value == 'False':
 833            return False
 834        elif value == 'None':
 835            return None
 836        else:
 837            try:
 838                return int(value)
 839            except ValueError:
 840                try:
 841                    return float(value)
 842                except ValueError:
 843                    return value
 844
 845    def unparseTagValue(self, value: base.TagValue) -> str:
 846        """
 847        Converts a tag value into a string that would be parsed back into a
 848        tag value via `parseTagValue`. Currently does not work for list,
 849        dictionary, Requirement, or Consequence values.
 850        TODO: Those
 851        """
 852        return str(value)
 853
 854    def hasZoneParts(self, name: str) -> bool:
 855        """
 856        Returns true if the specified name contains zone parts (using
 857        the `zoneSeparator`).
 858        """
 859        return self.formatDict[Lexeme.zoneSeparator] in name
 860
 861    def splitZone(
 862        self,
 863        name: str
 864    ) -> Tuple[List[base.Zone], base.DecisionName]:
 865        """
 866        Splits a decision name that includes zone information into the
 867        list-of-zones part and the decision part. If there is no zone
 868        information in the name, the list-of-zones will be an empty
 869        list.
 870        """
 871        sep = self.formatDict[Lexeme.zoneSeparator]
 872        parts = name.split(sep)
 873        return (list(parts[:-1]), parts[-1])
 874
 875    def prefixWithZone(
 876        self,
 877        name: base.DecisionName,
 878        zone: base.Zone
 879    ) -> base.DecisionName:
 880        """
 881        Returns the given decision name, prefixed with the given zone
 882        name. Does NOT check whether the decision name already includes
 883        a prefix or not.
 884        """
 885        return zone + self.formatDict[Lexeme.zoneSeparator] + name
 886
 887    def parseAnyTransitionFromTokens(
 888        self,
 889        tokens: LexedTokens,
 890        start: int = 0
 891    ) -> Tuple[base.TransitionWithOutcomes, int]:
 892        """
 893        Parses a `base.TransitionWithOutcomes` from a tokens list,
 894        accepting either a transition name or a transition name followed
 895        by a `Lexeme.withDetails` followed by a string of success and
 896        failure indicator characters. Returns a tuple containing a
 897        `base.TransitionWithOutcomes` and an integer indicating the end
 898        index of the parsed item within the tokens.
 899        """
 900        # Normalize start index so we can do index math
 901        if start < 0:
 902            useIndex = len(tokens) + start
 903        else:
 904            useIndex = start
 905
 906        try:
 907            first = tokens[useIndex]
 908        except IndexError:
 909            raise ParseError(
 910                f"Invalid token index: {start!r} among {len(tokens)}"
 911                f" tokens."
 912            )
 913
 914        if isinstance(first, Lexeme):
 915            raise ParseError(
 916                f"Expecting a transition name (possibly with a"
 917                f" success/failure indicator string) but first token is"
 918                f" {first!r}."
 919            )
 920
 921        try:
 922            second = tokens[useIndex + 1]
 923            third = tokens[useIndex + 2]
 924        except IndexError:
 925            return ((first, []), useIndex)
 926
 927        if second != Lexeme.withDetails or isinstance(third, Lexeme):
 928            return ((first, []), useIndex)
 929
 930        outcomes = []
 931        for char in third:
 932            if char == self.successIndicator:
 933                outcomes.append(True)
 934            elif char == self.failureIndicator:
 935                outcomes.append(False)
 936            else:
 937                return ((first, []), useIndex)
 938
 939        return ((first, outcomes), useIndex + 2)
 940
 941    def parseTransitionWithOutcomes(
 942        self,
 943        content: str
 944    ) -> base.TransitionWithOutcomes:
 945        """
 946        Takes a transition that may have outcomes listed as a series of
 947        s/f strings after a colon and returns the corresponding
 948        `TransitionWithOutcomes` tuple. Calls `lex` and then
 949        `parseAnyTransitionFromTokens`.
 950        """
 951        return self.parseAnyTransitionFromTokens(self.lex(content))[0]
 952
 953    def unparseTransitionWithOutocmes(
 954        self,
 955        transition: base.AnyTransition
 956    ) -> str:
 957        """
 958        Turns a `base.AnyTransition` back into a string that would parse
 959        to an equivalent `base.TransitionWithOutcomes` via
 960        `parseTransitionWithOutcomes`. If a bare `base.Transition` is
 961        given, returns a string that would result in a
 962        `base.TransitionWithOutcomes` that has an empty outcomes
 963        sequence.
 964        """
 965        if isinstance(transition, base.Transition):
 966            return transition
 967        elif (
 968            isinstance(transition, tuple)
 969        and len(transition) == 2
 970        and isinstance(transition[0], base.Transition)
 971        and isinstance(transition[1], list)
 972        and all(isinstance(sfi, bool) for sfi in transition[1])
 973        ):
 974            if len(transition[1]) == 0:
 975                return transition[0]
 976            else:
 977                result = transition[0] + self.formatDict[Lexeme.withDetails]
 978                for outcome in transition[1]:
 979                    if outcome:
 980                        result += self.successIndicator
 981                    else:
 982                        result += self.failureIndicator
 983                return result
 984        else:
 985            raise TypeError(
 986                f"Invalid AnyTransition: neither a string, nor a"
 987                f" length-2 tuple consisting of a string followed by a"
 988                f" list of booleans. Got: {transition!r}"
 989            )
 990
 991    def parseSpecificTransition(
 992        self,
 993        content: str
 994    ) -> Tuple[base.DecisionName, base.Transition]:
 995        """
 996        Splits a decision:transition pair to the decision and transition
 997        part, using a custom separator if one is defined.
 998        """
 999        sep = self.formatDict[Lexeme.withDetails]
1000        n = content.count(sep)
1001        if n == 0:
1002            raise ParseError(
1003                f"Cannot split '{content}' into a decision name and a"
1004                f" transition name (no separator '{sep}' found)."
1005            )
1006        elif n > 1:
1007            raise ParseError(
1008                f"Cannot split '{content}' into a decision name and a"
1009                f" transition name (too many ({n}) '{sep}' separators"
1010                f" found)."
1011            )
1012        else:
1013            return cast(
1014                Tuple[base.DecisionName, base.Transition],
1015                tuple(content.split(sep))
1016            )
1017
1018    def splitDirections(
1019        self,
1020        content: str
1021    ) -> Tuple[Optional[str], Optional[str]]:
1022        """
1023        Splits a piece of text using the 'Lexeme.reciprocalSeparator'
1024        into two pieces. If there is no separator, the second piece will
1025        be `None`; if either side of the separator is blank, that side
1026        will be `None`, and if there is more than one separator, a
1027        `ParseError` will be raised. Whitespace will be stripped from
1028        both sides of each result.
1029
1030        Examples:
1031
1032        >>> pf = ParseFormat()
1033        >>> pf.splitDirections('abc / def')
1034        ('abc', 'def')
1035        >>> pf.splitDirections('abc def ')
1036        ('abc def', None)
1037        >>> pf.splitDirections('abc def /')
1038        ('abc def', None)
1039        >>> pf.splitDirections('/abc def')
1040        (None, 'abc def')
1041        >>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
1042        Traceback (most recent call last):
1043          ...
1044        ParseError: ...
1045        """
1046        sep = self.formatDict[Lexeme.reciprocalSeparator]
1047        count = content.count(sep)
1048        if count > 1:
1049            raise ParseError(
1050                f"Too many split points ('{sep}') in content:"
1051                f" '{content}' (only one is allowed)."
1052            )
1053
1054        elif count == 1:
1055            before, after = content.split(sep)
1056            before = before.strip()
1057            after = after.strip()
1058            return (before or None, after or None)
1059
1060        else: # no split points
1061            stripped = content.strip()
1062            if stripped:
1063                return stripped, None
1064            else:
1065                return None, None
1066
1067    def parseItem(
1068        self,
1069        item: str
1070    ) -> Union[
1071        base.Capability,
1072        Tuple[base.Token, int],
1073        Tuple[base.MechanismName, base.MechanismState]
1074    ]:
1075        """
1076        Parses an item, which is a capability (just a string), a
1077        token-type*number pair (returned as a tuple with the number
1078        converted to an integer), or a mechanism-name:state pair
1079        (returned as a tuple with the state as a string). The
1080        'Lexeme.tokenCount' and `Lexeme.mechanismSeparator` format
1081        values determine the separators that this looks for.
1082        """
1083        tsep = self.formatDict[Lexeme.tokenCount]
1084        msep = self.formatDict[Lexeme.mechanismSeparator]
1085        if tsep in item:
1086            # It's a token w/ an associated count
1087            parts = item.split(tsep)
1088            if len(parts) != 2:
1089                raise ParseError(
1090                    f"Item '{item}' has a '{tsep}' but doesn't separate"
1091                    f" into a token type and a count."
1092                )
1093            typ, count = parts
1094            try:
1095                num = int(count)
1096            except ValueError:
1097                raise ParseError(
1098                    f"Item '{item}' has invalid token count '{count}'."
1099                )
1100
1101            return (typ, num)
1102        elif msep in item:
1103            parts = item.split(msep)
1104            mechanism = msep.join(parts[:-1])
1105            state = parts[-1]
1106            if mechanism.endswith(':'):
1107                # Just a zone-qualified name...
1108                return item
1109            else:
1110                return (mechanism, state)
1111        else:
1112            # It's just a capability
1113            return item
1114
1115    def unparseDecisionSpecifier(self, spec: base.DecisionSpecifier) -> str:
1116        """
1117        Turns a decision specifier back into a string, which would be
1118        parsed as a decision specifier as part of various different
1119        things.
1120
1121        For example:
1122
1123        >>> pf = ParseFormat()
1124        >>> pf.unparseDecisionSpecifier(
1125        ...     base.DecisionSpecifier(None, None, 'where')
1126        ... )
1127        'where'
1128        >>> pf.unparseDecisionSpecifier(
1129        ...     base.DecisionSpecifier(None, 'zone', 'where')
1130        ... )
1131        'zone::where'
1132        >>> pf.unparseDecisionSpecifier(
1133        ...     base.DecisionSpecifier('domain', 'zone', 'where')
1134        ... )
1135        'domain//zone::where'
1136        >>> pf.unparseDecisionSpecifier(
1137        ...     base.DecisionSpecifier('domain', None, 'where')
1138        ... )
1139        'domain//where'
1140        """
1141        result = spec.name
1142        if spec.zone is not None:
1143            result = (
1144                spec.zone
1145              + self.formatDict[Lexeme.zoneSeparator]
1146              + result
1147            )
1148        if spec.domain is not None:
1149            result = (
1150                spec.domain
1151              + self.formatDict[Lexeme.domainSeparator]
1152              + result
1153            )
1154        return result
1155
1156    def unparseMechanismSpecifier(
1157        self,
1158        spec: base.MechanismSpecifier
1159    ) -> str:
1160        """
1161        Turns a mechanism specifier back into a string, which would be
1162        parsed as a mechanism specifier as part of various different
1163        things. Note that a mechanism specifier with a zone part but no
1164        decision part is not valid, since it would parse as a decision
1165        part instead.
1166
1167        For example:
1168
1169        >>> pf = ParseFormat()
1170        >>> pf.unparseMechanismSpecifier(
1171        ...     base.MechanismSpecifier(None, None, None, 'lever')
1172        ... )
1173        'lever'
1174        >>> pf.unparseMechanismSpecifier(
1175        ...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
1176        ... )
1177        'domain//zone::decision::door'
1178        >>> pf.unparseMechanismSpecifier(
1179        ...     base.MechanismSpecifier('domain', None, None, 'door')
1180        ... )
1181        'domain//door'
1182        >>> pf.unparseMechanismSpecifier(
1183        ...     base.MechanismSpecifier(None, 'a', 'b', 'door')
1184        ... )
1185        'a::b::door'
1186        >>> pf.unparseMechanismSpecifier(
1187        ...     base.MechanismSpecifier(None, 'a', None, 'door')
1188        ... )
1189        Traceback (most recent call last):
1190        ...
1191        exploration.base.InvalidMechanismSpecifierError...
1192        >>> pf.unparseMechanismSpecifier(
1193        ...     base.MechanismSpecifier(None, None, 'a', 'door')
1194        ... )
1195        'a::door'
1196        """
1197        if spec.decision is None and spec.zone is not None:
1198            raise base.InvalidMechanismSpecifierError(
1199                f"Mechanism specifier has a zone part but no decision"
1200                f" part; it cannot be unparsed since it would parse"
1201                f" differently:\n{spec}"
1202            )
1203        result = spec.name
1204        if spec.decision is not None:
1205            result = (
1206                spec.decision
1207              + self.formatDict[Lexeme.zoneSeparator]
1208              + result
1209            )
1210        if spec.zone is not None:
1211            result = (
1212                spec.zone
1213              + self.formatDict[Lexeme.zoneSeparator]
1214              + result
1215            )
1216        if spec.domain is not None:
1217            result = (
1218                spec.domain
1219              + self.formatDict[Lexeme.domainSeparator]
1220              + result
1221            )
1222        return result
1223
1224    def effectType(self, effectMarker: str) -> Optional[base.EffectType]:
1225        """
1226        Returns the `base.EffectType` string corresponding to the
1227        given effect marker string. Returns `None` for an unrecognized
1228        marker.
1229        """
1230        return self.effectNames.get(effectMarker)
1231
1232    def parseCommandFromTokens(
1233        self,
1234        tokens: LexedTokens,
1235        start: int = 0,
1236        end: int = -1
1237    ) -> commands.Command:
1238        """
1239        Given tokens that specify a `commands.Command`, parses that
1240        command and returns it. Really just turns the tokens back into
1241        strings and calls `commands.command`.
1242
1243        For example:
1244
1245        >>> pf = ParseFormat()
1246        >>> t = ['val', '5']
1247        >>> c = commands.command(*t)
1248        >>> pf.parseCommandFromTokens(t) == c
1249        True
1250        >>> t = ['op', Lexeme.tokenCount, '$val', '$val']
1251        >>> c = commands.command('op', '*', '$val', '$val')
1252        >>> pf.parseCommandFromTokens(t) == c
1253        True
1254        """
1255        start, end, nTokens = normalizeEnds(tokens, start, end)
1256        args: List[str] = []
1257        for token in tokens[start:end + 1]:
1258            if isinstance(token, Lexeme):
1259                args.append(self.formatDict[token])
1260            else:
1261                args.append(token)
1262
1263        if len(args) == 0:
1264            raise ParseError(
1265                f"No arguments for command:\n{tokens[start:end + 1]}"
1266            )
1267        return commands.command(*args)
1268
1269    def unparseCommand(self, command: commands.Command) -> str:
1270        """
1271        Turns a `Command` back into the string that would produce that
1272        command when parsed using `parseCommandList`.
1273
1274        Note that the results will be more explicit in some cases than what
1275        `parseCommandList` would accept as input.
1276
1277        For example:
1278
1279        >>> pf = ParseFormat()
1280        >>> pf.unparseCommand(
1281        ...     commands.LiteralValue(command='val', value='5')
1282        ... )
1283        'val 5'
1284        >>> pf.unparseCommand(
1285        ...     commands.LiteralValue(command='val', value='"5"')
1286        ... )
1287        'val "5"'
1288        >>> pf.unparseCommand(
1289        ...     commands.EstablishCollection(
1290        ...         command='empty',
1291        ...         collection='list'
1292        ...     )
1293        ... )
1294        'empty list'
1295        >>> pf.unparseCommand(
1296        ...     commands.AppendValue(command='append', value='$_')
1297        ... )
1298        'append $_'
1299        """
1300        candidate = None
1301        for k, v in commands.COMMAND_SETUP.items():
1302            if v[0] == type(command):
1303                if candidate is None:
1304                    candidate = k
1305                else:
1306                    raise ValueError(
1307                        f"COMMAND_SETUP includes multiple keys with"
1308                        f" {type(command)} as their value type:"
1309                        f" '{candidate}' and '{k}'."
1310                    )
1311
1312        if candidate is None:
1313            raise ValueError(
1314                f"COMMAND_SETUP has no key with {type(command)} as its"
1315                f" value type."
1316            )
1317
1318        result = candidate
1319        for x in command[1:]:
1320            # TODO: Is this hack good enough?
1321            result += ' ' + str(x)
1322        return result
1323
1324    def unparseCommandList(self, commands: List[commands.Command]) -> str:
1325        """
1326        Takes a list of commands and returns a string that would parse
1327        into them using `parseOneEffectArg`. The result contains
1328        newlines and indentation to make it easier to read.
1329
1330        For example:
1331
1332        >>> pf = ParseFormat()
1333        >>> pf.unparseCommandList(
1334        ...     [commands.command('val', '5'), commands.command('pop')]
1335        ... )
1336        '{\\n  val 5;\\n  pop;\\n}'
1337        """
1338        result = self.formatDict[Lexeme.openCurly]
1339        for cmd in commands:
1340            result += f'\n  {self.unparseCommand(cmd)};'
1341        if len(commands) > 0:
1342            result += '\n'
1343        return result + self.formatDict[Lexeme.closeCurly]
1344
1345    def parseCommandListFromTokens(
1346        self,
1347        tokens: LexedTokens,
1348        start: int = 0
1349    ) -> Tuple[List[commands.Command], int]:
1350        """
1351        Parses a command list from a list of lexed tokens, which must
1352        start with `Lexeme.openCurly`. Returns the parsed command list
1353        as a list of `commands.Command` objects, along with the end
1354        index of that command list (which will be the matching curly
1355        brace.
1356        """
1357        end = self.matchingBrace(
1358            tokens,
1359            start,
1360            Lexeme.openCurly,
1361            Lexeme.closeCurly
1362        )
1363        parts = list(
1364            findSeparatedParts(
1365                tokens,
1366                Lexeme.consequenceSeparator,
1367                start + 1,
1368                end - 1,
1369                Lexeme.openCurly,
1370                Lexeme.closeCurly,
1371            )
1372        )
1373        return (
1374            [
1375                self.parseCommandFromTokens(tokens, fromIndex, toIndex)
1376                for fromIndex, toIndex in parts
1377                if fromIndex <= toIndex  # ignore empty parts
1378            ],
1379            end
1380        )
1381
1382    def parseOneEffectArg(
1383        self,
1384        tokens: LexedTokens,
1385        start: int = 0,
1386        limit: Optional[int] = None
1387    ) -> Tuple[
1388        Union[
1389            base.Capability,  # covers 'str' possibility
1390            Tuple[base.Token, base.TokenCount],
1391            Tuple[Literal['skill'], base.Skill, base.Level],
1392            Tuple[base.MechanismSpecifier, base.MechanismState],
1393            base.DecisionSpecifier,
1394            base.DecisionID,
1395            Literal[Lexeme.inCommon, Lexeme.isHidden],
1396            Tuple[Literal[Lexeme.sepOrDelay, Lexeme.effectCharges], int],
1397            List[commands.Command]
1398        ],
1399        int
1400    ]:
1401        """
1402        Looks at tokens starting at the specified position and parses
1403        one or more of them as an effect argument (an argument that
1404        could be given to `base.effect`). Looks at various key `Lexeme`s
1405        to determine which type to use.
1406
1407        Items in the tokens list beyond the specified limit will not be
1408        considered, even when they in theory could be grouped with items
1409        up to the limit into a more complex argument.
1410
1411        For example:
1412
1413        >>> pf = ParseFormat()
1414        >>> pf.parseOneEffectArg(['hi'])
1415        ('hi', 0)
1416        >>> pf.parseOneEffectArg(['hi'], 1)
1417        Traceback (most recent call last):
1418        ...
1419        IndexError...
1420        >>> pf.parseOneEffectArg(['hi', 'bye'])
1421        ('hi', 0)
1422        >>> pf.parseOneEffectArg(['hi', 'bye'], 1)
1423        ('bye', 1)
1424        >>> pf.parseOneEffectArg(
1425        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1426        ...     0
1427        ... )
1428        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1429 name='gate'), 'open'), 2)
1430        >>> pf.parseOneEffectArg(
1431        ...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
1432        ...     1
1433        ... )
1434        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1435 name='gate'), 'open'), 3)
1436        >>> pf.parseOneEffectArg(
1437        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1438        ...     1
1439        ... )
1440        Traceback (most recent call last):
1441        ...
1442        exploration.parsing.ParseError...
1443        >>> pf.parseOneEffectArg(
1444        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1445        ...     2
1446        ... )
1447        ('open', 2)
1448        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
1449        (('gold', 10), 2)
1450        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
1451        Traceback (most recent call last):
1452        ...
1453        exploration.parsing.ParseError...
1454        >>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
1455        (<Lexeme.inCommon: ...>, 0)
1456        >>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
1457        (<Lexeme.isHidden: ...>, 0)
1458        >>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
1459        Traceback (most recent call last):
1460        ...
1461        exploration.parsing.ParseError...
1462        >>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
1463        ((<Lexeme.effectCharges: ...>, 3), 1)
1464        >>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
1465        Traceback (most recent call last):
1466        ...
1467        exploration.parsing.ParseError...
1468        >>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
1469        ((<Lexeme.sepOrDelay: ...>, -2), 1)
1470        >>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
1471        (('skill', 'agility', 3), 2)
1472        >>> pf.parseOneEffectArg(
1473        ...     [
1474        ...         'main',
1475        ...         Lexeme.domainSeparator,
1476        ...         'zone',
1477        ...         Lexeme.zoneSeparator,
1478        ...         'decision',
1479        ...         Lexeme.zoneSeparator,
1480        ...         'compass',
1481        ...         Lexeme.mechanismSeparator,
1482        ...         'north',
1483        ...         'south',
1484        ...         'east',
1485        ...         'west'
1486        ...     ],
1487        ...     0
1488        ... )
1489        ((MechanismSpecifier(domain='main', zone='zone',\
1490 decision='decision', name='compass'), 'north'), 8)
1491        >>> pf.parseOneEffectArg(
1492        ...     [
1493        ...         'before',
1494        ...         'main',
1495        ...         Lexeme.domainSeparator,
1496        ...         'zone',
1497        ...         Lexeme.zoneSeparator,
1498        ...         'decision',
1499        ...         Lexeme.zoneSeparator,
1500        ...         'compass',
1501        ...         'north',
1502        ...         'south',
1503        ...         'east',
1504        ...         'west'
1505        ...     ],
1506        ...     1
1507        ... )  # a mechanism specifier without a state will become a
1508        ...    # decision specifier
1509        (DecisionSpecifier(domain='main', zone='zone',\
1510 name='decision'), 5)
1511        >>> tokens = [
1512        ...     'set',
1513        ...     'main',
1514        ...     Lexeme.domainSeparator,
1515        ...     'zone',
1516        ...     Lexeme.zoneSeparator,
1517        ...     'compass',
1518        ...     'north',
1519        ...     'bounce',
1520        ... ]
1521        >>> pf.parseOneEffectArg(tokens, 0)
1522        ('set', 0)
1523        >>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
1524        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1525        >>> pf.parseOneEffectArg(tokens, 1)
1526        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1527        >>> pf.parseOneEffectArg(tokens, 6)
1528        ('north', 6)
1529        >>> pf.parseOneEffectArg(tokens, 7)
1530        ('bounce', 7)
1531        >>> pf.parseOneEffectArg(
1532        ...     [
1533        ...         "fort", Lexeme.zoneSeparator, "gate",
1534        ...             Lexeme.mechanismSeparator, "open",
1535        ...     ],
1536        ...     0
1537        ... )
1538        ((MechanismSpecifier(domain=None, zone=None, decision='fort',\
1539 name='gate'), 'open'), 4)
1540        >>> pf.parseOneEffectArg(
1541        ...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
1542        ...     0
1543        ... ) == ([commands.command('val', '5')], 3)
1544        True
1545        >>> a = [
1546        ...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
1547        ...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
1548        ...     'pop', Lexeme.closeCurly
1549        ... ]
1550        >>> cl = [
1551        ...     [commands.command('val', '5')],
1552        ...     [commands.command('append'), commands.command('pop')]
1553        ... ]
1554        >>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
1555        True
1556        >>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
1557        True
1558        >>> pf.parseOneEffectArg(a, 1)
1559        ('val', 1)
1560        >>> pf.parseOneEffectArg(a, 2)
1561        ('5', 2)
1562        >>> pf.parseOneEffectArg(a, 3)
1563        Traceback (most recent call last):
1564        ...
1565        exploration.parsing.ParseError...
1566        """
1567        start, limit, nTokens = normalizeEnds(
1568            tokens,
1569            start,
1570            limit if limit is not None else -1
1571        )
1572        if nTokens == 0:
1573            raise ParseError("No effect arguments available.")
1574
1575        first = tokens[start]
1576
1577        if nTokens == 1:
1578            if first in (Lexeme.inCommon, Lexeme.isHidden):
1579                return (first, start)
1580            elif not isinstance(first, str):
1581                raise ParseError(
1582                    f"Only one token and it's a special character"
1583                    f" ({first} = {repr(self.formatDict[first])})"
1584                )
1585            else:
1586                return (cast(base.Capability, first), start)
1587
1588        assert (nTokens > 1)
1589
1590        second = tokens[start + 1]
1591
1592        # Command lists start with an open curly brace and effect
1593        # modifiers start with a Lexme, but nothing else may
1594        if first == Lexeme.openCurly:
1595            return self.parseCommandListFromTokens(tokens, start)
1596        elif first in (Lexeme.inCommon, Lexeme.isHidden):
1597            return (first, start)
1598        elif first in (Lexeme.sepOrDelay, Lexeme.effectCharges):
1599            if not isinstance(second, str):
1600                raise ParseError(
1601                    f"Token following a modifier that needs a count"
1602                    f" must be a string in tokens:"
1603                    f"\n{tokens[start:limit or len(tokens)]}"
1604                )
1605            try:
1606                val = int(second)
1607            except ValueError:
1608                raise ParseError(
1609                    f"Token following a modifier that needs a count"
1610                    f" must be convertible to an int:"
1611                    f"\n{tokens[start:limit or len(tokens)]}"
1612                )
1613
1614            first = cast(
1615                Literal[Lexeme.sepOrDelay, Lexeme.effectCharges],
1616                first
1617            )
1618            return ((first, val), start + 1)
1619        elif not isinstance(first, str):
1620            raise ParseError(
1621                f"First token must be a string unless it's a modifier"
1622                f" lexeme or command/reversion-set opener. Got:"
1623                f"\n{tokens[start:limit or len(tokens)]}"
1624            )
1625
1626        # If we have two strings in a row, then the first is our parsed
1627        # value alone and we'll parse the second separately.
1628        if isinstance(second, str):
1629            return (first, start)
1630        elif second in (Lexeme.inCommon, Lexeme.isHidden):
1631            return (first, start)
1632
1633        # Must have at least 3 tokens at this point, or else we need to
1634        # have the inCommon or isHidden lexeme second.
1635        if nTokens < 3:
1636            return (first, start)
1637
1638        third = tokens[start + 2]
1639        if not isinstance(third, str):
1640            return (first, start)
1641
1642        second = cast(Lexeme, second)
1643        third = cast(str, third)
1644
1645        if second in (Lexeme.tokenCount, Lexeme.skillLevel):
1646            try:
1647                num = int(third)
1648            except ValueError:
1649                raise ParseError(
1650                    f"Invalid effect tokens: count for Tokens or level"
1651                    f" for Skill must be convertible to an integer."
1652                    f"\n{tokens[start:limit + 1]}"
1653                )
1654            if second == Lexeme.tokenCount:
1655                return ((first, num), start + 2)  # token/count pair
1656            else:
1657                return (('skill', first, num), start + 2)  # token/count pair
1658
1659        elif second == Lexeme.mechanismSeparator:  # bare mechanism
1660            return (
1661                (
1662                    base.MechanismSpecifier(
1663                        domain=None,
1664                        zone=None,
1665                        decision=None,
1666                        name=first
1667                    ),
1668                    third
1669                ),
1670                start + 2
1671            )
1672
1673        elif second in (Lexeme.domainSeparator, Lexeme.zoneSeparator):
1674            try:
1675                mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
1676                    tokens,
1677                    start
1678                )  # works whether it's a mechanism or decision specifier...
1679            except ParseError:
1680                return self.parseDecisionSpecifierFromTokens(tokens, start)
1681            if mEnd + 2 > limit:
1682                # No room for following mechanism separator + state
1683                return self.parseDecisionSpecifierFromTokens(tokens, start)
1684            sep = tokens[mEnd + 1]
1685            after = tokens[mEnd + 2]
1686            if sep == Lexeme.mechanismSeparator:
1687                if not isinstance(after, str):
1688                    raise ParseError(
1689                        f"Mechanism separator not followed by state:"
1690                        f"\n{tokens[start]}"
1691                    )
1692                return ((mSpec, after), mEnd + 2)
1693            else:
1694                # No mechanism separator afterwards
1695                return self.parseDecisionSpecifierFromTokens(tokens, start)
1696
1697        else:  # unrecognized as a longer combo
1698            return (first, start)
1699
1700    def coalesceEffectArgs(
1701        self,
1702        tokens: LexedTokens,
1703        start: int = 0,
1704        end: int = -1
1705    ) -> Tuple[
1706        List[  # List of effect args
1707            Union[
1708                base.Capability,  # covers 'str' possibility
1709                Tuple[base.Token, base.TokenCount],
1710                Tuple[Literal['skill'], base.Skill, base.Level],
1711                Tuple[base.MechanismSpecifier, base.MechanismState],
1712                base.DecisionSpecifier,
1713                List[commands.Command],
1714                Set[str]
1715            ]
1716        ],
1717        Tuple[  # Slots for modifiers: common/hidden/charges/delay
1718            Optional[bool],
1719            Optional[bool],
1720            Optional[int],
1721            Optional[int],
1722        ]
1723    ]:
1724        """
1725        Given a region of a lexed tokens list which contains one or more
1726        effect arguments, combines token sequences representing things
1727        like capabilities, mechanism states, token counts, and skill
1728        levels, representing these using the tuples that would be passed
1729        to `base.effect`. Returns a tuple with two elements:
1730
1731        - First, a list that contains several different kinds of
1732            objects, each of which is distinguishable by its type or
1733            part of its value.
1734        - Next, a tuple with four entires for common, hidden, charges,
1735            and/or delay values based on the presence of modifier
1736            sequences. Any or all of these may be `None` if the relevant
1737            modifier was not present (the usual case).
1738
1739        For example:
1740
1741        >>> pf = ParseFormat()
1742        >>> pf.coalesceEffectArgs(["jump"])
1743        (['jump'], (None, None, None, None))
1744        >>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
1745        ([('coin', 3), 'fly'], (None, None, None, None))
1746        >>> pf.coalesceEffectArgs(
1747        ...     [
1748        ...         "fort", Lexeme.zoneSeparator, "gate",
1749        ...             Lexeme.mechanismSeparator, "open"
1750        ...     ]
1751        ... )
1752        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1753 name='gate'), 'open')], (None, None, None, None))
1754        >>> pf.coalesceEffectArgs(
1755        ...     [
1756        ...         "main", Lexeme.domainSeparator, "cliff"
1757        ...     ]
1758        ... )
1759        ([DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1760 (None, None, None, None))
1761        >>> pf.coalesceEffectArgs(
1762        ...     [
1763        ...         "door", Lexeme.mechanismSeparator, "open"
1764        ...     ]
1765        ... )
1766        ([(MechanismSpecifier(domain=None, zone=None, decision=None,\
1767 name='door'), 'open')], (None, None, None, None))
1768        >>> pf.coalesceEffectArgs(
1769        ...     [
1770        ...         "fort", Lexeme.zoneSeparator, "gate",
1771        ...             Lexeme.mechanismSeparator, "open",
1772        ...         "canJump",
1773        ...         "coins", Lexeme.tokenCount, "3",
1774        ...         Lexeme.inCommon,
1775        ...         "agility", Lexeme.skillLevel, "-1",
1776        ...         Lexeme.sepOrDelay, "0",
1777        ...         "main", Lexeme.domainSeparator, "cliff"
1778        ...     ]
1779        ... )
1780        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1781 name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1),\
1782 DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1783 (True, None, None, 0))
1784        >>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
1785        (['bounce'], (None, True, None, None))
1786        >>> pf.coalesceEffectArgs(
1787        ...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
1788        ... )
1789        (['goto', '3'], (True, True, None, None))
1790        """
1791        start, end, nTokens = normalizeEnds(tokens, start, end)
1792        where = start
1793        result: List[  # List of effect args
1794            Union[
1795                base.Capability,  # covers 'str' possibility
1796                Tuple[base.Token, base.TokenCount],
1797                Tuple[Literal['skill'], base.Skill, base.Level],
1798                Tuple[base.MechanismSpecifier, base.MechanismState],
1799                base.DecisionSpecifier,
1800                List[commands.Command],
1801                Set[str]
1802            ]
1803        ] = []
1804        inCommon: Optional[bool] = None
1805        isHidden: Optional[bool] = None
1806        charges: Optional[int] = None
1807        delay: Optional[int] = None
1808        while where <= end:
1809            following, thisEnd = self.parseOneEffectArg(tokens, where, end)
1810            if following == Lexeme.inCommon:
1811                if inCommon is not None:
1812                    raise ParseError(
1813                        f"In-common effect modifier specified more than"
1814                        f" once in effect args:"
1815                        f"\n{tokens[start:end + 1]}"
1816                    )
1817                inCommon = True
1818            elif following == Lexeme.isHidden:
1819                if isHidden is not None:
1820                    raise ParseError(
1821                        f"Is-hidden effect modifier specified more than"
1822                        f" once in effect args:"
1823                        f"\n{tokens[start:end + 1]}"
1824                    )
1825                isHidden = True
1826            elif (
1827                isinstance(following, tuple)
1828            and len(following) == 2
1829            and following[0] in (Lexeme.effectCharges, Lexeme.sepOrDelay)
1830            and isinstance(following[1], int)
1831            ):
1832                if following[0] == Lexeme.effectCharges:
1833                    if charges is not None:
1834                        raise ParseError(
1835                            f"Charges effect modifier specified more than"
1836                            f" once in effect args:"
1837                            f"\n{tokens[start:end + 1]}"
1838                        )
1839                    charges = following[1]
1840                else:
1841                    if delay is not None:
1842                        raise ParseError(
1843                            f"Delay effect modifier specified more than"
1844                            f" once in effect args:"
1845                            f"\n{tokens[start:end + 1]}"
1846                        )
1847                    delay = following[1]
1848            elif (
1849                    isinstance(following, base.Capability)
1850                 or (
1851                    isinstance(following, tuple)
1852                and len(following) == 2
1853                and isinstance(following[0], base.Token)
1854                and isinstance(following[1], base.TokenCount)
1855                ) or (
1856                    isinstance(following, tuple)
1857                and len(following) == 3
1858                and following[0] == 'skill'
1859                and isinstance(following[1], base.Skill)
1860                and isinstance(following[2], base.Level)
1861                ) or (
1862                    isinstance(following, tuple)
1863                and len(following) == 2
1864                and isinstance(following[0], base.MechanismSpecifier)
1865                and isinstance(following[1], base.MechanismState)
1866                ) or (
1867                    isinstance(following, base.DecisionSpecifier)
1868                ) or (
1869                    isinstance(following, list)
1870                and all(isinstance(item, tuple) for item in following)
1871                    # TODO: Stricter command list check here?
1872                ) or (
1873                    isinstance(following, set)
1874                and all(isinstance(item, str) for item in following)
1875                )
1876            ):
1877                result.append(following)
1878            else:
1879                raise ParseError(f"Invalid coalesced argument: {following}")
1880            where = thisEnd + 1
1881
1882        return (result, (inCommon, isHidden, charges, delay))
1883
1884    def parseEffectFromTokens(
1885        self,
1886        tokens: LexedTokens,
1887        start: int = 0,
1888        end: int = -1
1889    ) -> base.Effect:
1890        """
1891        Given a region of a list of lexed tokens specifying an effect,
1892        returns the `Effect` object that those tokens specify.
1893        """
1894        start, end, nTokens = normalizeEnds(tokens, start, end)
1895
1896        # Check for empty list
1897        if nTokens == 0:
1898            raise ParseError(
1899                "Effect must include at least a type."
1900            )
1901
1902        firstPart = tokens[start]
1903
1904        if isinstance(firstPart, Lexeme):
1905            raise ParseError(
1906                f"First part of effect must be an effect type. Got"
1907                f" {firstPart} ({repr(self.formatDict[firstPart])})."
1908            )
1909
1910        firstPart = cast(str, firstPart)
1911
1912        # Get the effect type
1913        fType = self.effectType(firstPart)
1914
1915        if fType is None:
1916            raise ParseError(
1917                f"Unrecognized effect type {firstPart!r}. Check the"
1918                f" EffectType entries in the effect names dictionary."
1919            )
1920
1921        if start + 1 > end:  # No tokens left: set empty args
1922            groupedArgs: List[
1923                Union[
1924                    base.Capability,  # covers 'str' possibility
1925                    Tuple[base.Token, base.TokenCount],
1926                    Tuple[Literal['skill'], base.Skill, base.Level],
1927                    Tuple[base.MechanismSpecifier, base.MechanismState],
1928                    base.DecisionSpecifier,
1929                    List[commands.Command],
1930                    Set[str]
1931                ]
1932            ] = []
1933            modifiers: Tuple[
1934                Optional[bool],
1935                Optional[bool],
1936                Optional[int],
1937                Optional[int]
1938            ] = (None, None, None, None)
1939        else:  # Coalesce remaining tokens if there are any
1940            groupedArgs, modifiers = self.coalesceEffectArgs(
1941                tokens,
1942                start + 1,
1943                end
1944            )
1945
1946        # Set up arguments for base.effect and handle modifiers first
1947        args: Dict[
1948            str,
1949            Union[
1950                None,
1951                base.ContextSpecifier,
1952                base.Capability,
1953                Tuple[base.Token, base.TokenCount],
1954                Tuple[Literal['skill'], base.Skill, base.Level],
1955                Tuple[base.MechanismSpecifier, base.MechanismState],
1956                Tuple[base.MechanismSpecifier, List[base.MechanismState]],
1957                List[base.Capability],
1958                base.AnyDecisionSpecifier,
1959                Tuple[base.AnyDecisionSpecifier, base.FocalPointName],
1960                bool,
1961                int,
1962                base.SaveSlot,
1963                Tuple[base.SaveSlot, Set[str]]
1964            ]
1965        ] = {}
1966        if modifiers[0]:
1967            args['applyTo'] = 'common'
1968        if modifiers[1]:
1969            args['hidden'] = True
1970        else:
1971            args['hidden'] = False
1972        if modifiers[2] is not None:
1973            args['charges'] = modifiers[2]
1974        if modifiers[3] is not None:
1975            args['delay'] = modifiers[3]
1976
1977        # Now handle the main effect-type-based argument
1978        if fType in ("gain", "lose"):
1979            if len(groupedArgs) != 1:
1980                raise ParseError(
1981                    f"'{fType}' effect must have exactly one grouped"
1982                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
1983                )
1984            thing = groupedArgs[0]
1985            if isinstance(thing, tuple):
1986                if len(thing) == 2:
1987                    if (
1988                        not isinstance(thing[0], base.Token)
1989                     or not isinstance(thing[1], base.TokenCount)
1990                    ):
1991                        raise ParseError(
1992                            f"'{fType}' effect grouped arg pair must be a"
1993                            f" (token, amount) pair. Got:\n{thing}"
1994                        )
1995                elif len(thing) == 3:
1996                    if (
1997                        thing[0] != 'skill'
1998                     or not isinstance(thing[1], base.Skill)
1999                     or not isinstance(thing[2], base.Level)
2000                    ):
2001                        raise ParseError(
2002                            f"'{fType}' effect grouped arg pair must be a"
2003                            f" (token, amount) pair. Got:\n{thing}"
2004                        )
2005                else:
2006                    raise ParseError(
2007                        f"'{fType}' effect grouped arg tuple must have"
2008                        f" length 2 or 3. Got (length {len(thing)}):\n{thing}"
2009                    )
2010            elif not isinstance(thing, base.Capability):
2011                raise ParseError(
2012                    f"'{fType}' effect grouped arg must be a capability"
2013                    f" or a (token, amount) tuple. Got:\n{thing}"
2014                )
2015            args[fType] = thing
2016            return base.effect(**args)  # type:ignore
2017
2018        elif fType == "set":
2019            if len(groupedArgs) != 1:
2020                raise ParseError(
2021                    f"'{fType}' effect must have exactly one grouped"
2022                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
2023                )
2024            setVal = groupedArgs[0]
2025            if not isinstance(
2026                setVal,
2027                tuple
2028            ):
2029                raise ParseError(
2030                    f"'{fType}' effect grouped arg must be a tuple. Got:"
2031                    f"\n{setVal}"
2032                )
2033            if len(setVal) == 2:
2034                setWhat, setTo = setVal
2035                if (
2036                    isinstance(setWhat, base.Token)
2037                and isinstance(setTo, base.TokenCount)
2038                ) or (
2039                    isinstance(setWhat, base.MechanismSpecifier)
2040                and isinstance(setTo, base.MechanismState)
2041                ):
2042                    args[fType] = setVal
2043                    return base.effect(**args)  # type:ignore
2044                else:
2045                    raise ParseError(
2046                        f"Invalid '{fType}' effect grouped args:"
2047                        f"\n{groupedArgs}"
2048                    )
2049            elif len(setVal) == 3:
2050                indicator, whichSkill, setTo = setVal
2051                if (
2052                    indicator == 'skill'
2053                and isinstance(whichSkill, base.Skill)
2054                and isinstance(setTo, base.Level)
2055                ):
2056                    args[fType] = setVal
2057                    return base.effect(**args)  # type:ignore
2058                else:
2059                    raise ParseError(
2060                        f"Invalid '{fType}' effect grouped args (not a"
2061                        f" skill):\n{groupedArgs}"
2062                    )
2063            else:
2064                raise ParseError(
2065                    f"Invalid '{fType}' effect grouped args (wrong"
2066                    f" length tuple):\n{groupedArgs}"
2067                )
2068
2069        elif fType == "toggle":
2070            if len(groupedArgs) == 0:
2071                raise ParseError(
2072                    f"'{fType}' effect must have at least one grouped"
2073                    f" argument. Got:\n{groupedArgs}"
2074                )
2075            if (
2076                isinstance(groupedArgs[0], tuple)
2077            and len(groupedArgs[0]) == 2
2078            and isinstance(groupedArgs[0][0], base.MechanismSpecifier)
2079            and isinstance(groupedArgs[0][1], base.MechanismState)
2080            and all(
2081                    isinstance(a, base.MechanismState)
2082                    for a in groupedArgs[1:]
2083                )
2084            ):  # a mechanism toggle
2085                args[fType] = (
2086                    groupedArgs[0][0],
2087                    cast(
2088                        List[base.MechanismState],
2089                        [groupedArgs[0][1]] + groupedArgs[1:]
2090                    )
2091                )
2092                return base.effect(**args)  # type:ignore
2093            elif all(isinstance(a, base.Capability) for a in groupedArgs):
2094                # a capability toggle
2095                args[fType] = cast(List[base.Capability], groupedArgs)
2096                return base.effect(**args)  # type:ignore
2097            else:
2098                raise ParseError(
2099                    f"Invalid arguments for '{fType}' effect. Got:"
2100                    f"\n{groupedArgs}"
2101                )
2102
2103        elif fType in ("bounce", "deactivate"):
2104            if len(groupedArgs) != 0:
2105                raise ParseError(
2106                    f"'{fType}' effect may not include any"
2107                    f" arguments. Got {len(groupedArgs)}):"
2108                    f"\n{groupedArgs}"
2109                )
2110            args[fType] = True
2111            return base.effect(**args)  # type:ignore
2112
2113        elif fType == "follow":
2114            if len(groupedArgs) != 1:
2115                raise ParseError(
2116                    f"'{fType}' effect must include exactly one"
2117                    f" argument. Got {len(groupedArgs)}):"
2118                    f"\n{groupedArgs}"
2119                )
2120
2121            transition = groupedArgs[0]
2122            if not isinstance(transition, base.Transition):
2123                raise ParseError(
2124                    f"Invalid argument for '{fType}' effect. Needed a"
2125                    f" transition but got:\n{groupedArgs}"
2126                )
2127            args[fType] = transition
2128            return base.effect(**args)  # type:ignore
2129
2130        elif fType == "edit":
2131            if len(groupedArgs) == 0:
2132                raise ParseError(
2133                    "An 'edit' effect requires at least one argument."
2134                )
2135            for i, arg in enumerate(groupedArgs):
2136                if not isinstance(arg, list):
2137                    raise ParseError(
2138                        f"'edit' effect argument {i} is not a sub-list:"
2139                        f"\n  {arg!r}"
2140                        f"\nAmong arguments:"
2141                        f"\n  {groupedArgs}"
2142                    )
2143                for j, cmd in enumerate(arg):
2144                    if not isinstance(cmd, tuple):
2145                        raise ParseError(
2146                            f"'edit' effect argument {i} contains"
2147                            f" non-tuple part {j}:"
2148                            f"\n  {cmd!r}"
2149                            f"\nAmong arguments:"
2150                            f"\n  {groupedArgs}"
2151                        )
2152
2153            args[fType] = groupedArgs  # type:ignore
2154            return base.effect(**args)  # type:ignore
2155
2156        elif fType == "goto":
2157            if len(groupedArgs) not in (1, 2):
2158                raise ParseError(
2159                    f"A 'goto' effect must include either one or two"
2160                    f" grouped arguments. Got {len(groupedArgs)}:"
2161                    f"\n{groupedArgs}"
2162                )
2163
2164            first = groupedArgs[0]
2165            if not isinstance(
2166                first,
2167                (base.DecisionName, base.DecisionSpecifier)
2168            ):
2169                raise ParseError(
2170                    f"'{fType}' effect must first specify a destination"
2171                    f" decision. Got:\n{groupedArgs}"
2172                )
2173
2174            # Check if it's really a decision ID
2175            dSpec: base.AnyDecisionSpecifier
2176            if isinstance(first, base.DecisionName):
2177                try:
2178                    dSpec = int(first)
2179                except ValueError:
2180                    dSpec = first
2181            else:
2182                dSpec = first
2183
2184            if len(groupedArgs) == 2:
2185                second = groupedArgs[1]
2186                if not isinstance(second, base.FocalPointName):
2187                    raise ParseError(
2188                        f"'{fType}' effect must have a focal point name"
2189                        f" if it has a second part. Got:\n{groupedArgs}"
2190                    )
2191                args[fType] = (dSpec, second)
2192            else:
2193                args[fType] = dSpec
2194
2195            return base.effect(**args)  # type:ignore
2196
2197        elif fType == "save":
2198            if len(groupedArgs) not in (0, 1):
2199                raise ParseError(
2200                    f"'{fType}' effect must include exactly zero or one"
2201                    f" argument(s). Got {len(groupedArgs)}):"
2202                    f"\n{groupedArgs}"
2203                )
2204
2205            if len(groupedArgs) == 1:
2206                slot = groupedArgs[0]
2207            else:
2208                slot = base.DEFAULT_SAVE_SLOT
2209            if not isinstance(slot, base.SaveSlot):
2210                raise ParseError(
2211                    f"Invalid argument for '{fType}' effect. Needed a"
2212                    f" save slot but got:\n{groupedArgs}"
2213                )
2214            args[fType] = slot
2215            return base.effect(**args)  # type:ignore
2216
2217        else:
2218            raise ParseError(f"Invalid effect type: '{fType}'.")
2219
2220    def parseEffect(self, effectStr: str) -> base.Effect:
2221        """
2222        Works like `parseEffectFromTokens` but starts with a raw string.
2223        For example:
2224
2225        >>> pf = ParseFormat()
2226        >>> pf.parseEffect("gain jump") == base.effect(gain='jump')
2227        True
2228        >>> pf.parseEffect("set door:open") == base.effect(
2229        ...     set=(
2230        ...         base.MechanismSpecifier(None, None, None, 'door'),
2231        ...         'open'
2232        ...     )
2233        ... )
2234        True
2235        >>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
2236        True
2237        >>> pf.parseEffect("set agility^3") == base.effect(
2238        ...     set=('skill', 'agility', 3)
2239        ... )
2240        True
2241        """
2242        return self.parseEffectFromTokens(self.lex(effectStr))
2243
2244    def unparseEffect(self, effect: base.Effect) -> str:
2245        """
2246        The opposite of `parseEffect`; turns an effect back into a
2247        string reprensentation.
2248
2249        For example:
2250
2251        >>> pf = ParseFormat()
2252        >>> e = {
2253        ...     "type": "gain",
2254        ...     "applyTo": "active",
2255        ...     "value": "flight",
2256        ...     "delay": None,
2257        ...     "charges": None,
2258        ...     "hidden": False
2259        ... }
2260        >>> pf.unparseEffect(e)
2261        'gain flight'
2262        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2263        True
2264        >>> s = 'gain flight'
2265        >>> pf.unparseEffect(pf.parseEffect(s)) == s
2266        True
2267        >>> s2 = '  gain\\nflight'
2268        >>> pf.unparseEffect(pf.parseEffect(s2)) == s
2269        True
2270        >>> e = {
2271        ...     "type": "gain",
2272        ...     "applyTo": "active",
2273        ...     "value": ("gold", 5),
2274        ...     "delay": 1,
2275        ...     "charges": 2,
2276        ...     "hidden": False
2277        ... }
2278        >>> pf.unparseEffect(e)
2279        'gain gold*5 ,1 =2'
2280        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2281        True
2282        >>> e = {
2283        ...     "type": "set",
2284        ...     "applyTo": "active",
2285        ...     "value": (
2286        ...         base.MechanismSpecifier(None, None, None, "gears"),
2287        ...         "on"
2288        ...     ),
2289        ...     "delay": None,
2290        ...     "charges": 1,
2291        ...     "hidden": False
2292        ... }
2293        >>> pf.unparseEffect(e)
2294        'set gears:on =1'
2295        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2296        True
2297        >>> e = {
2298        ...     "type": "toggle",
2299        ...     "applyTo": "active",
2300        ...     "value": ["red", "blue"],
2301        ...     "delay": None,
2302        ...     "charges": None,
2303        ...     "hidden": False
2304        ... }
2305        >>> pf.unparseEffect(e)
2306        'toggle red blue'
2307        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2308        True
2309        >>> e = {
2310        ...     "type": "toggle",
2311        ...     "applyTo": "active",
2312        ...     "value": (
2313        ...         base.MechanismSpecifier(None, None, None, "switch"),
2314        ...         ["on", "off"]
2315        ...     ),
2316        ...     "delay": None,
2317        ...     "charges": None,
2318        ...     "hidden": False
2319        ... }
2320        >>> pf.unparseEffect(e)
2321        'toggle switch:on off'
2322        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2323        True
2324        >>> e = {
2325        ...     "type": "deactivate",
2326        ...     "applyTo": "active",
2327        ...     "value": None,
2328        ...     "delay": 2,
2329        ...     "charges": None,
2330        ...     "hidden": False
2331        ... }
2332        >>> pf.unparseEffect(e)
2333        'deactivate ,2'
2334        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2335        True
2336        >>> e = {
2337        ...     "type": "goto",
2338        ...     "applyTo": "common",
2339        ...     "value": 3,
2340        ...     "delay": None,
2341        ...     "charges": None,
2342        ...     "hidden": False
2343        ... }
2344        >>> pf.unparseEffect(e)
2345        'goto 3 +c'
2346        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2347        True
2348        >>> e = {
2349        ...     "type": "goto",
2350        ...     "applyTo": "common",
2351        ...     "value": 3,
2352        ...     "delay": None,
2353        ...     "charges": None,
2354        ...     "hidden": True
2355        ... }
2356        >>> pf.unparseEffect(e)
2357        'goto 3 +c +h'
2358        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2359        True
2360        >>> e = {
2361        ...     "type": "goto",
2362        ...     "applyTo": "active",
2363        ...     "value": 'home',
2364        ...     "delay": None,
2365        ...     "charges": None,
2366        ...     "hidden": False
2367        ... }
2368        >>> pf.unparseEffect(e)
2369        'goto home'
2370        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2371        True
2372        >>> e = base.effect(edit=[
2373        ...     [
2374        ...         commands.command('val', '5'),
2375        ...         commands.command('empty', 'list'),
2376        ...         commands.command('append', '$_')
2377        ...     ],
2378        ...     [
2379        ...         commands.command('val', '11'),
2380        ...         commands.command('assign', 'var', '$_'),
2381        ...         commands.command('op', '+', '$var', '$var')
2382        ...     ],
2383        ... ])
2384        >>> pf.unparseEffect(e)
2385        'edit {\\n  val 5;\\n  empty list;\\n  append $_;\\n}\
2386 {\\n  val 11;\\n  assign var $_;\\n  op + $var $var;\\n}'
2387        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2388        True
2389        """
2390        result: List[str] = []
2391
2392        # Reverse the effect type into a marker
2393        eType = effect['type']
2394        for key, val in self.effectNames.items():
2395            if val == eType:
2396                if len(result) != 0:
2397                    raise ParseError(
2398                        f"Effect map contains multiple matching entries"
2399                        f"for effect type '{effect['type']}':"
2400                        f" '{result[0]}' and '{key}'"
2401                    )
2402                result.append(key)
2403                # Don't break 'cause we'd like to check uniqueness
2404
2405        eVal = effect['value']
2406        if eType in ('gain', 'lose'):
2407            eVal = cast(Union[base.Capability, Tuple[base.Token, int]], eVal)
2408            if isinstance(eVal, str):  # a capability
2409                result.append(eVal)
2410            else:  # a token
2411                result.append(
2412                    eVal[0]
2413                  + self.formatDict[Lexeme.tokenCount]
2414                  + str(eVal[1])
2415                )
2416        elif eType == 'set':
2417            eVal = cast(
2418                # TODO: Add skill level setting here & elsewhere
2419                Union[
2420                    Tuple[base.Token, base.TokenCount],
2421                    Tuple[base.MechanismSpecifier, base.MechanismState]
2422                ],
2423                eVal
2424            )
2425            if len(eVal) != 2:
2426                raise ValueError(
2427                    f"'set' effect has non-length-2 value:"
2428                    f"\n  {repr(effect)}"
2429                )
2430            if isinstance(eVal[1], int):  # a token count
2431                result.append(eVal[0])
2432                result.append(self.formatDict[Lexeme.tokenCount])
2433                result.append(str(eVal[1]))
2434            else:  # a mechanism
2435                if isinstance(eVal[0], base.MechanismSpecifier):
2436                    mSpec = self.unparseMechanismSpecifier(eVal[0])
2437                else:
2438                    print(f"eval[0] is: {type(eVal[0])} : {eVal[0]!r}")
2439                    assert isinstance(eVal[0], base.MechanismName)
2440                    mSpec = eVal[0]
2441                result.append(
2442                    mSpec
2443                  + self.formatDict[Lexeme.mechanismSeparator]
2444                  + eVal[1]
2445                )
2446        elif eType == 'toggle':
2447            if isinstance(eVal, tuple):  # mechanism states
2448                tSpec, states = cast(
2449                    Tuple[
2450                        base.AnyMechanismSpecifier,
2451                        List[base.MechanismState]
2452                    ],
2453                    eVal
2454                )
2455                firstState = states[0]
2456                restStates = states[1:]
2457                if isinstance(tSpec, base.MechanismSpecifier):
2458                    mStr = self.unparseMechanismSpecifier(tSpec)
2459                else:
2460                    mStr = str(tSpec)
2461                result.append(
2462                    mStr
2463                  + self.formatDict[Lexeme.mechanismSeparator]
2464                  + firstState
2465                )
2466                result.extend(restStates)
2467            else:  # capabilities
2468                assert isinstance(eVal, list)
2469                eVal = cast(List[base.Capability], eVal)
2470                result.extend(eVal)
2471        elif eType in ('deactivate', 'bounce'):
2472            if eVal is not None:
2473                raise ValueError(
2474                    f"'{eType}' effect has non-None value:"
2475                    f"\n  {repr(effect)}"
2476                )
2477        elif eType == 'follow':
2478            eVal = cast(base.Token, eVal)
2479            result.append(eVal)
2480        elif eType == 'edit':
2481            eVal = cast(List[List[commands.Command]], eVal)
2482            if len(eVal) == 0:
2483                result[-1] = '{}'
2484            else:
2485                for cmdList in eVal:
2486                    result.append(
2487                        self.unparseCommandList(cmdList)
2488                    )
2489        elif eType == 'goto':
2490            if isinstance(eVal, base.DecisionSpecifier):
2491                result.append(self.unparseDecisionSpecifier(eVal))
2492            elif isinstance(eVal, (base.DecisionID, base.DecisionName)):
2493                result.append(str(eVal))
2494            elif (
2495                isinstance(eVal, tuple)
2496            and len(eVal) == 2
2497            and isinstance(eVal[1], base.FocalPointName)
2498            ):
2499                if isinstance(eVal[0], base.DecisionSpecifier):
2500                    result.append(self.unparseDecisionSpecifier(eVal[0]))
2501                else:
2502                    result.append(str(eVal[0]))
2503                result.append(eVal[1])
2504            else:
2505                raise ValueError(
2506                    f"'{eType}' effect has invalid value {eVal}"
2507                )
2508        elif eType == 'save':
2509            # It's just a string naming the save slot
2510            result.append(eVal)
2511        else:
2512            raise ValueError(
2513                f"Unrecognized effect type '{eType}' in effect:"
2514                f"\n  {repr(effect)}"
2515            )
2516
2517        # Add modifier strings
2518        if effect['applyTo'] == 'common':
2519            result.append(self.formatDict[Lexeme.inCommon])
2520
2521        if effect['hidden']:
2522            result.append(self.formatDict[Lexeme.isHidden])
2523
2524        dVal = effect['delay']
2525        if dVal is not None:
2526            result.append(
2527                self.formatDict[Lexeme.sepOrDelay] + str(dVal)
2528            )
2529
2530        cVal = effect['charges']
2531        if cVal is not None:
2532            result.append(
2533                self.formatDict[Lexeme.effectCharges] + str(cVal)
2534            )
2535
2536        joined = ''
2537        before = False
2538        for r in result:
2539            if (
2540                r.startswith(' ')
2541             or r.startswith('\n')
2542             or r.endswith(' ')
2543             or r.endswith('\n')
2544            ):
2545                joined += r
2546                before = False
2547            else:
2548                joined += (' ' if before else '') + r
2549                before = True
2550        return joined
2551
2552    def parseDecisionSpecifierFromTokens(
2553        self,
2554        tokens: LexedTokens,
2555        start: int = 0
2556    ) -> Tuple[Union[base.DecisionSpecifier, int], int]:
2557        """
2558        Parses a decision specifier starting at the specified position
2559        in the given tokens list. No ending position is specified, but
2560        instead this function returns a tuple containing the parsed
2561        `base.DecisionSpecifier` along with an index in the tokens list
2562        where the end of the specifier was found.
2563
2564        For example:
2565
2566        >>> pf = ParseFormat()
2567        >>> pf.parseDecisionSpecifierFromTokens(['m'])
2568        (DecisionSpecifier(domain=None, zone=None, name='m'), 0)
2569        >>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
2570        (12, 0)
2571        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
2572        (DecisionSpecifier(domain=None, zone=None, name='a'), 0)
2573        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
2574        (DecisionSpecifier(domain=None, zone=None, name='m'), 1)
2575        >>> pf.parseDecisionSpecifierFromTokens(
2576        ...     ['a', Lexeme.domainSeparator, 'm']
2577        ... )
2578        (DecisionSpecifier(domain='a', zone=None, name='m'), 2)
2579        >>> pf.parseDecisionSpecifierFromTokens(
2580        ...     ['a', Lexeme.zoneSeparator, 'm']
2581        ... )
2582        (DecisionSpecifier(domain=None, zone='a', name='m'), 2)
2583        >>> pf.parseDecisionSpecifierFromTokens(
2584        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
2585        ... )
2586        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2587        >>> pf.parseDecisionSpecifierFromTokens(
2588        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
2589        ... )
2590        (DecisionSpecifier(domain='a', zone='b', name='m'), 4)
2591        >>> pf.parseDecisionSpecifierFromTokens(
2592        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
2593        ... )
2594        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2595        >>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
2596        ...     ['a', Lexeme.zoneSeparator, '5'],
2597        ... )
2598        Traceback (most recent call last):
2599        ...
2600        exploration.base.InvalidDecisionSpecifierError...
2601        >>> pf.parseDecisionSpecifierFromTokens(
2602        ...     ['d', Lexeme.domainSeparator, '123']
2603        ... )
2604        Traceback (most recent call last):
2605        ...
2606        exploration.base.InvalidDecisionSpecifierError...
2607        >>> pf.parseDecisionSpecifierFromTokens(
2608        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2609        ...     1
2610        ... )
2611        Traceback (most recent call last):
2612        ...
2613        exploration.parsing.ParseError...
2614        >>> pf.parseDecisionSpecifierFromTokens(
2615        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2616        ...     2
2617        ... )
2618        (DecisionSpecifier(domain='b', zone=None, name='m'), 4)
2619        >>> pf.parseDecisionSpecifierFromTokens(
2620        ...     [
2621        ...         'a',
2622        ...         Lexeme.domainSeparator,
2623        ...         'b',
2624        ...         Lexeme.zoneSeparator,
2625        ...         'c',
2626        ...         Lexeme.zoneSeparator,
2627        ...         'm'
2628        ...     ]
2629        ... )
2630        (DecisionSpecifier(domain='a', zone='b', name='c'), 4)
2631        >>> pf.parseDecisionSpecifierFromTokens(
2632        ...     [
2633        ...         'a',
2634        ...         Lexeme.domainSeparator,
2635        ...         'b',
2636        ...         Lexeme.zoneSeparator,
2637        ...         'c',
2638        ...         Lexeme.zoneSeparator,
2639        ...         'm'
2640        ...     ],
2641        ...     2
2642        ... )
2643        (DecisionSpecifier(domain=None, zone='b', name='c'), 4)
2644        >>> pf.parseDecisionSpecifierFromTokens(
2645        ...     [
2646        ...         'a',
2647        ...         Lexeme.domainSeparator,
2648        ...         'b',
2649        ...         Lexeme.zoneSeparator,
2650        ...         'c',
2651        ...         Lexeme.zoneSeparator,
2652        ...         'm'
2653        ...     ],
2654        ...     4
2655        ... )
2656        (DecisionSpecifier(domain=None, zone='c', name='m'), 6)
2657        >>> pf.parseDecisionSpecifierFromTokens(
2658        ...     [
2659        ...         'set',
2660        ...         'main',
2661        ...         Lexeme.domainSeparator,
2662        ...         'zone',
2663        ...         Lexeme.zoneSeparator,
2664        ...         'compass',
2665        ...         'north',
2666        ...         'bounce',
2667        ...     ],
2668        ...     1
2669        ... )
2670        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
2671        """
2672        # Check bounds & normalize start index
2673        nTokens = len(tokens)
2674        if start < -nTokens:
2675            raise IndexError(
2676                f"Invalid start index {start} for {nTokens} tokens (too"
2677                f" negative)."
2678            )
2679        elif start >= nTokens:
2680            raise IndexError(
2681                f"Invalid start index {start} for {nTokens} tokens (too"
2682                f" big)."
2683            )
2684        elif start < 0:
2685            start = nTokens + start
2686
2687        assert (start < nTokens)
2688
2689        first = tokens[start]
2690        if not isinstance(first, str):
2691            raise ParseError(
2692                f"Invalid domain specifier (must start with a name or"
2693                f" id; got: {first} = {self.formatDict[first]})."
2694            )
2695
2696        ds = base.DecisionSpecifier(None, None, first)
2697        result = (base.idOrDecisionSpecifier(ds), start)
2698
2699        domain = None
2700        zoneOrDecision = None
2701
2702        if start + 1 >= nTokens:  # at end of tokens
2703            return result
2704
2705        firstSep = tokens[start + 1]
2706        if firstSep == Lexeme.domainSeparator:
2707            domain = first
2708        elif firstSep == Lexeme.zoneSeparator:
2709            zoneOrDecision = first
2710        else:
2711            return result
2712
2713        if start + 2 >= nTokens:
2714            return result
2715
2716        second = tokens[start + 2]
2717        if isinstance(second, Lexeme):
2718            return result
2719
2720        ds = base.DecisionSpecifier(domain, zoneOrDecision, second)
2721        result = (base.idOrDecisionSpecifier(ds), start + 2)
2722
2723        if start + 3 >= nTokens:
2724            return result
2725
2726        secondSep = tokens[start + 3]
2727        if start + 4 >= nTokens:
2728            return result
2729
2730        third = tokens[start + 4]
2731        if secondSep == Lexeme.zoneSeparator:
2732            if zoneOrDecision is not None:  # two in a row
2733                return result
2734            else:
2735                if not isinstance(third, base.DecisionName):
2736                    return result
2737                else:
2738                    zoneOrDecision = second
2739        else:
2740            return result
2741
2742        if isinstance(third, Lexeme):
2743            return result
2744
2745        ds = base.DecisionSpecifier(domain, zoneOrDecision, third)
2746        return (base.idOrDecisionSpecifier(ds), start + 4)
2747
2748    def parseDecisionSpecifier(
2749        self,
2750        specString: str
2751    ) -> Union[base.DecisionID, base.DecisionSpecifier]:
2752        """
2753        Parses a full `DecisionSpecifier` from a single string. Can
2754        parse integer decision IDs in string form, and returns a
2755        `DecisionID` in that case, otherwise returns a
2756        `DecisionSpecifier`. Assumes that all int-convertible strings
2757        are decision IDs, so it cannot deal with feature names which are
2758        just numbers.
2759
2760        For example:
2761
2762        >>> pf = ParseFormat()
2763        >>> pf.parseDecisionSpecifier('example')
2764        DecisionSpecifier(domain=None, zone=None, name='example')
2765        >>> pf.parseDecisionSpecifier('outer::example')
2766        DecisionSpecifier(domain=None, zone='outer', name='example')
2767        >>> pf.parseDecisionSpecifier('domain//region::feature')
2768        DecisionSpecifier(domain='domain', zone='region', name='feature')
2769        >>> pf.parseDecisionSpecifier('123')
2770        123
2771        >>> pf.parseDecisionSpecifier('region::domain//feature')
2772        Traceback (most recent call last):
2773        ...
2774        exploration.base.InvalidDecisionSpecifierError...
2775        >>> pf.parseDecisionSpecifier('domain1//domain2//feature')
2776        Traceback (most recent call last):
2777        ...
2778        exploration.base.InvalidDecisionSpecifierError...
2779        >>> pf.parseDecisionSpecifier('domain//123')
2780        Traceback (most recent call last):
2781        ...
2782        exploration.base.InvalidDecisionSpecifierError...
2783        >>> pf.parseDecisionSpecifier('region::123')
2784        Traceback (most recent call last):
2785        ...
2786        exploration.base.InvalidDecisionSpecifierError...
2787        """
2788        try:
2789            return int(specString)
2790        except ValueError:
2791            tokens = self.lex(specString)
2792            result, end = self.parseDecisionSpecifierFromTokens(tokens)
2793            if end != len(tokens) - 1:
2794                raise base.InvalidDecisionSpecifierError(
2795                    f"Junk after end of decision specifier:"
2796                    f"\n{tokens[end + 1:]}"
2797                )
2798            return result
2799
2800    def parseFeatureSpecifierFromTokens(
2801        self,
2802        tokens: LexedTokens,
2803        start: int = 0,
2804        limit: int = -1
2805    ) -> Tuple[base.FeatureSpecifier, int]:
2806        """
2807        Parses a `FeatureSpecifier` starting from the specified part of
2808        a tokens list. Returns a tuple containing the feature specifier
2809        and the end position of the end of the feature specifier.
2810
2811        Can parse integer feature IDs in string form, as well as nested
2812        feature specifiers and plain feature specifiers. Assumes that
2813        all int-convertible strings are feature IDs, so it cannot deal
2814        with feature names which are just numbers.
2815
2816        For example:
2817
2818        >>> pf = ParseFormat()
2819        >>> pf.parseFeatureSpecifierFromTokens(['example'])
2820        (FeatureSpecifier(domain=None, within=[], feature='example',\
2821 part=None), 0)
2822        >>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
2823        (FeatureSpecifier(domain=None, within=[], feature='example2',\
2824 part=None), 1)
2825        >>> pf.parseFeatureSpecifierFromTokens(
2826        ...     [
2827        ...         'domain',
2828        ...         Lexeme.domainSeparator,
2829        ...         'region',
2830        ...         Lexeme.zoneSeparator,
2831        ...         'feature',
2832        ...         Lexeme.partSeparator,
2833        ...         'part'
2834        ...     ]
2835        ... )
2836        (FeatureSpecifier(domain='domain', within=['region'],\
2837 feature='feature', part='part'), 6)
2838        >>> pf.parseFeatureSpecifierFromTokens(
2839        ...     [
2840        ...         'outerRegion',
2841        ...         Lexeme.zoneSeparator,
2842        ...         'midRegion',
2843        ...         Lexeme.zoneSeparator,
2844        ...         'innerRegion',
2845        ...         Lexeme.zoneSeparator,
2846        ...         'feature'
2847        ...     ]
2848        ... )
2849        (FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
2850 'innerRegion'], feature='feature', part=None), 6)
2851        >>> pf.parseFeatureSpecifierFromTokens(
2852        ...     [
2853        ...         'outerRegion',
2854        ...         Lexeme.zoneSeparator,
2855        ...         'midRegion',
2856        ...         Lexeme.zoneSeparator,
2857        ...         'innerRegion',
2858        ...         Lexeme.zoneSeparator,
2859        ...         'feature'
2860        ...     ],
2861        ...     1
2862        ... )
2863        Traceback (most recent call last):
2864        ...
2865        exploration.parsing.InvalidFeatureSpecifierError...
2866        >>> pf.parseFeatureSpecifierFromTokens(
2867        ...     [
2868        ...         'outerRegion',
2869        ...         Lexeme.zoneSeparator,
2870        ...         'midRegion',
2871        ...         Lexeme.zoneSeparator,
2872        ...         'innerRegion',
2873        ...         Lexeme.zoneSeparator,
2874        ...         'feature'
2875        ...     ],
2876        ...     2
2877        ... )
2878        (FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'],\
2879 feature='feature', part=None), 6)
2880        >>> pf.parseFeatureSpecifierFromTokens(
2881        ...     [
2882        ...         'outerRegion',
2883        ...         Lexeme.zoneSeparator,
2884        ...         'feature',
2885        ...         Lexeme.domainSeparator,
2886        ...         'after',
2887        ...     ]
2888        ... )
2889        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2890 feature='feature', part=None), 2)
2891        >>> pf.parseFeatureSpecifierFromTokens(
2892        ...     [
2893        ...         'outerRegion',
2894        ...         Lexeme.zoneSeparator,
2895        ...         'feature',
2896        ...         Lexeme.domainSeparator,
2897        ...         'after',
2898        ...     ],
2899        ...     2
2900        ... )
2901        (FeatureSpecifier(domain='feature', within=[], feature='after',\
2902 part=None), 4)
2903        >>> # Including a limit:
2904        >>> pf.parseFeatureSpecifierFromTokens(
2905        ...     [
2906        ...         'outerRegion',
2907        ...         Lexeme.zoneSeparator,
2908        ...         'midRegion',
2909        ...         Lexeme.zoneSeparator,
2910        ...         'feature',
2911        ...     ],
2912        ...     0,
2913        ...     2
2914        ... )
2915        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2916 feature='midRegion', part=None), 2)
2917        >>> pf.parseFeatureSpecifierFromTokens(
2918        ...     [
2919        ...         'outerRegion',
2920        ...         Lexeme.zoneSeparator,
2921        ...         'midRegion',
2922        ...         Lexeme.zoneSeparator,
2923        ...         'feature',
2924        ...     ],
2925        ...     0,
2926        ...     0
2927        ... )
2928        (FeatureSpecifier(domain=None, within=[], feature='outerRegion',\
2929 part=None), 0)
2930        >>> pf.parseFeatureSpecifierFromTokens(
2931        ...     [
2932        ...         'region',
2933        ...         Lexeme.zoneSeparator,
2934        ...         Lexeme.zoneSeparator,
2935        ...         'feature',
2936        ...     ]
2937        ... )
2938        (FeatureSpecifier(domain=None, within=[], feature='region',\
2939 part=None), 0)
2940        """
2941        start, limit, nTokens = normalizeEnds(tokens, start, limit)
2942
2943        if nTokens == 0:
2944            raise InvalidFeatureSpecifierError(
2945                "Can't parse a feature specifier from 0 tokens."
2946            )
2947        first = tokens[start]
2948        if isinstance(first, Lexeme):
2949            raise InvalidFeatureSpecifierError(
2950                f"Feature specifier can't begin with a special token."
2951                f"Got:\n{tokens[start:limit + 1]}"
2952            )
2953
2954        if nTokens in (1, 2):
2955            # 2 tokens isn't enough for a second part
2956            fs = base.FeatureSpecifier(
2957                domain=None,
2958                within=[],
2959                feature=first,
2960                part=None
2961            )
2962            return (base.normalizeFeatureSpecifier(fs), start)
2963
2964        firstSep = tokens[start + 1]
2965        secondPart = tokens[start + 2]
2966
2967        if (
2968            firstSep not in (
2969                Lexeme.domainSeparator,
2970                Lexeme.zoneSeparator,
2971                Lexeme.partSeparator
2972            )
2973         or not isinstance(secondPart, str)
2974        ):
2975            # Following tokens won't work out
2976            fs = base.FeatureSpecifier(
2977                domain=None,
2978                within=[],
2979                feature=first,
2980                part=None
2981            )
2982            return (base.normalizeFeatureSpecifier(fs), start)
2983
2984        if firstSep == Lexeme.domainSeparator:
2985            if start + 2 > limit:
2986                return (
2987                    base.FeatureSpecifier(
2988                        domain=first,
2989                        within=[],
2990                        feature=secondPart,
2991                        part=None
2992                    ),
2993                    start + 2
2994                )
2995            else:
2996                rest, restEnd = self.parseFeatureSpecifierFromTokens(
2997                    tokens,
2998                    start + 2,
2999                    limit
3000                )
3001                if rest.domain is not None:  # two domainSeparators in a row
3002                    fs = base.FeatureSpecifier(
3003                        domain=first,
3004                        within=[],
3005                        feature=rest.domain,
3006                        part=None
3007                    )
3008                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3009                else:
3010                    fs = base.FeatureSpecifier(
3011                        domain=first,
3012                        within=rest.within,
3013                        feature=rest.feature,
3014                        part=rest.part
3015                    )
3016                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3017
3018        elif firstSep == Lexeme.zoneSeparator:
3019            if start + 2 > limit:
3020                fs = base.FeatureSpecifier(
3021                    domain=None,
3022                    within=[first],
3023                    feature=secondPart,
3024                    part=None
3025                )
3026                return (base.normalizeFeatureSpecifier(fs), start + 2)
3027            else:
3028                rest, restEnd = self.parseFeatureSpecifierFromTokens(
3029                    tokens,
3030                    start + 2,
3031                    limit
3032                )
3033                if rest.domain is not None:  # domain sep after zone sep
3034                    fs = base.FeatureSpecifier(
3035                        domain=None,
3036                        within=[first],
3037                        feature=rest.domain,
3038                        part=None
3039                    )
3040                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3041                else:
3042                    within = [first]
3043                    within.extend(rest.within)
3044                    fs = base.FeatureSpecifier(
3045                        domain=None,
3046                        within=within,
3047                        feature=rest.feature,
3048                        part=rest.part
3049                    )
3050                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3051
3052        else:  # must be partSeparator
3053            fs = base.FeatureSpecifier(
3054                domain=None,
3055                within=[],
3056                feature=first,
3057                part=secondPart
3058            )
3059            return (base.normalizeFeatureSpecifier(fs), start + 2)
3060
3061    def parseFeatureSpecifier(self, specString: str) -> base.FeatureSpecifier:
3062        """
3063        Parses a full `FeatureSpecifier` from a single string. See
3064        `parseFeatureSpecifierFromTokens`.
3065
3066        >>> pf = ParseFormat()
3067        >>> pf.parseFeatureSpecifier('example')
3068        FeatureSpecifier(domain=None, within=[], feature='example', part=None)
3069        >>> pf.parseFeatureSpecifier('outer::example')
3070        FeatureSpecifier(domain=None, within=['outer'], feature='example',\
3071 part=None)
3072        >>> pf.parseFeatureSpecifier('example%%middle')
3073        FeatureSpecifier(domain=None, within=[], feature='example',\
3074 part='middle')
3075        >>> pf.parseFeatureSpecifier('domain//region::feature%%part')
3076        FeatureSpecifier(domain='domain', within=['region'],\
3077 feature='feature', part='part')
3078        >>> pf.parseFeatureSpecifier(
3079        ...     'outerRegion::midRegion::innerRegion::feature'
3080        ... )
3081        FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
3082 'innerRegion'], feature='feature', part=None)
3083        >>> pf.parseFeatureSpecifier('region::domain//feature')
3084        Traceback (most recent call last):
3085        ...
3086        exploration.parsing.InvalidFeatureSpecifierError...
3087        >>> pf.parseFeatureSpecifier('feature%%part1%%part2')
3088        Traceback (most recent call last):
3089        ...
3090        exploration.parsing.InvalidFeatureSpecifierError...
3091        >>> pf.parseFeatureSpecifier('domain1//domain2//feature')
3092        Traceback (most recent call last):
3093        ...
3094        exploration.parsing.InvalidFeatureSpecifierError...
3095        >>> # TODO: Issue warnings for these...
3096        >>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
3097        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3098        >>> pf.parseFeatureSpecifier('region::123')  # zone discarded
3099        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3100        >>> pf.parseFeatureSpecifier('123%%part')
3101        FeatureSpecifier(domain=None, within=[], feature=123, part='part')
3102        """
3103        tokens = self.lex(specString)
3104        result, rEnd = self.parseFeatureSpecifierFromTokens(tokens)
3105        if rEnd != len(tokens) - 1:
3106            raise InvalidFeatureSpecifierError(
3107                f"Feature specifier has extra stuff at end:"
3108                f" {tokens[rEnd + 1:]}"
3109            )
3110        else:
3111            return result
3112
3113    def normalizeFeatureSpecifier(
3114        self,
3115        spec: base.AnyFeatureSpecifier
3116    ) -> base.FeatureSpecifier:
3117        """
3118        Normalizes any kind of feature specifier into an official
3119        `FeatureSpecifier` tuple.
3120
3121        For example:
3122
3123        >>> pf = ParseFormat()
3124        >>> pf.normalizeFeatureSpecifier('town')
3125        FeatureSpecifier(domain=None, within=[], feature='town', part=None)
3126        >>> pf.normalizeFeatureSpecifier(5)
3127        FeatureSpecifier(domain=None, within=[], feature=5, part=None)
3128        >>> pf.parseFeatureSpecifierFromTokens(
3129        ...     [
3130        ...         'domain',
3131        ...         Lexeme.domainSeparator,
3132        ...         'region',
3133        ...         Lexeme.zoneSeparator,
3134        ...         'feature',
3135        ...         Lexeme.partSeparator,
3136        ...         'part'
3137        ...     ]
3138        ... )
3139        (FeatureSpecifier(domain='domain', within=['region'],\
3140 feature='feature', part='part'), 6)
3141        >>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
3142        FeatureSpecifier(domain='dom', within=['one', 'two'],\
3143 feature='three', part='middle')
3144        >>> pf.normalizeFeatureSpecifier(
3145        ...   base.FeatureSpecifier(None, ['region'], 'place', None)
3146        ... )
3147        FeatureSpecifier(domain=None, within=['region'], feature='place',\
3148 part=None)
3149        >>> fs = base.FeatureSpecifier(None, [], 'place', None)
3150        >>> ns = pf.normalizeFeatureSpecifier(fs)
3151        >>> ns is fs  # Doesn't create unnecessary clones
3152        True
3153        """
3154        if isinstance(spec, base.FeatureSpecifier):
3155            return spec
3156        elif isinstance(spec, base.FeatureID):
3157            return base.FeatureSpecifier(None, [], spec, None)
3158        elif isinstance(spec, str):
3159            return self.parseFeatureSpecifier(spec)
3160        else:
3161            raise TypeError(f"Invalid feature specifier type: '{type(spec)}'")
3162
3163    def unparseChallenge(self, challenge: base.Challenge) -> str:
3164        """
3165        Turns a `base.Challenge` into a string that can be turned back
3166        into an equivalent challenge by `parseChallenge`. For example:
3167
3168        >>> pf = ParseFormat()
3169        >>> c = base.challenge(
3170        ...     skills=base.BestSkill('brains', 'brawn'),
3171        ...     level=2,
3172        ...     success=[base.effect(set=('switch', 'on'))],
3173        ...     failure=[
3174        ...         base.effect(deactivate=True, delay=1),
3175        ...         base.effect(bounce=True)
3176        ...     ],
3177        ...     outcome=True
3178        ... )
3179        >>> r = pf.unparseChallenge(c)
3180        >>> r
3181        '<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
3182        >>> pf.parseChallenge(r) == c
3183        True
3184        >>> c2 = base.challenge(
3185        ...     skills=base.CombinedSkill(
3186        ...         -2,
3187        ...         base.ConditionalSkill(
3188        ...             base.ReqCapability('tough'),
3189        ...             base.BestSkill(1),
3190        ...             base.BestSkill(-1)
3191        ...         )
3192        ...     ),
3193        ...     level=-2,
3194        ...     success=[base.effect(gain='orb')],
3195        ...     failure=[],
3196        ...     outcome=None
3197        ... )
3198        >>> r2 = pf.unparseChallenge(c2)
3199        >>> r2
3200        '<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
3201        >>> # TODO: let this parse through without BestSkills...
3202        >>> pf.parseChallenge(r2) == c2
3203        True
3204        """
3205        lt = self.formatDict[Lexeme.angleLeft]
3206        gt = self.formatDict[Lexeme.angleRight]
3207        result = (
3208            lt + str(challenge['level']) + gt
3209          + challenge['skills'].unparse()
3210        )
3211        if challenge['outcome'] is True:
3212            result += gt
3213        result += self.unparseConsequence(challenge['success'])
3214        if challenge['outcome'] is False:
3215            result += gt
3216        result += self.unparseConsequence(challenge['failure'])
3217        return result
3218
3219    def unparseCondition(self, condition: base.Condition) -> str:
3220        """
3221        Given a `base.Condition` returns a string that would result in
3222        that condition if given to `parseCondition`. For example:
3223
3224        >>> pf = ParseFormat()
3225        >>> c = base.condition(
3226        ...     condition=base.ReqAny([
3227        ...         base.ReqCapability('brawny'),
3228        ...         base.ReqNot(base.ReqTokens('weights', 3))
3229        ...     ]),
3230        ...     consequence=[base.effect(gain='power')]
3231        ... )
3232        >>> r = pf.unparseCondition(c)
3233        >>> r
3234        '??((brawny|!(weights*3))){gain power}{}'
3235        >>> pf.parseCondition(r) == c
3236        True
3237        """
3238        return (
3239            self.formatDict[Lexeme.doubleQuestionmark]
3240          + self.formatDict[Lexeme.openParen]
3241          + condition['condition'].unparse()
3242          + self.formatDict[Lexeme.closeParen]
3243          + self.unparseConsequence(condition['consequence'])
3244          + self.unparseConsequence(condition['alternative'])
3245        )
3246
3247    def unparseConsequence(self, consequence: base.Consequence) -> str:
3248        """
3249        Given a `base.Consequence`, returns a string encoding of it,
3250        using the same format that `parseConsequence` will parse. Uses
3251        function-call-like syntax and curly braces to denote different
3252        sub-consequences. See also `SkillCombination.unparse` and
3253        `Requirement.unparse` For example:
3254
3255        >>> pf = ParseFormat()
3256        >>> c = [base.effect(gain='one'), base.effect(lose='one')]
3257        >>> pf.unparseConsequence(c)
3258        '{gain one; lose one}'
3259        >>> c = [
3260        ...     base.challenge(
3261        ...         skills=base.BestSkill('brains', 'brawn'),
3262        ...         level=2,
3263        ...         success=[base.effect(set=('switch', 'on'))],
3264        ...         failure=[
3265        ...             base.effect(deactivate=True, delay=1),
3266        ...             base.effect(bounce=True)
3267        ...         ],
3268        ...         outcome=True
3269        ...     )
3270        ... ]
3271        >>> pf.unparseConsequence(c)
3272        '{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
3273        >>> c[0]['outcome'] = False
3274        >>> pf.unparseConsequence(c)
3275        '{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
3276        >>> c[0]['outcome'] = None
3277        >>> pf.unparseConsequence(c)
3278        '{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
3279        >>> c = [
3280        ...     base.condition(
3281        ...         condition=base.ReqAny([
3282        ...             base.ReqCapability('brawny'),
3283        ...             base.ReqNot(base.ReqTokens('weights', 3))
3284        ...         ]),
3285        ...         consequence=[
3286        ...             base.challenge(
3287        ...                 skills=base.CombinedSkill('brains', 'brawn'),
3288        ...                 level=3,
3289        ...                 success=[base.effect(goto='home')],
3290        ...                 failure=[base.effect(bounce=True)],
3291        ...                 outcome=None
3292        ...             )
3293        ...         ]  # no alternative -> empty list
3294        ...     )
3295        ... ]
3296        >>> pf.unparseConsequence(c)
3297        '{??((brawny|!(weights*3))){\
3298<3>sum(brains, brawn){goto home}{bounce}}{}}'
3299        >>> c = [base.effect(gain='if(power){gain "mimic"}')]
3300        >>> # TODO: Make this work!
3301        >>> # pf.unparseConsequence(c)
3302
3303        '{gain "if(power){gain \\\\"mimic\\\\"}"}'
3304        """
3305        result = self.formatDict[Lexeme.openCurly]
3306        for item in consequence:
3307            if 'skills' in item:  # a Challenge
3308                item = cast(base.Challenge, item)
3309                result += self.unparseChallenge(item)
3310
3311            elif 'value' in item:  # an Effect
3312                item = cast(base.Effect, item)
3313                result += self.unparseEffect(item)
3314
3315            elif 'condition' in item:  # a Condition
3316                item = cast(base.Condition, item)
3317                result += self.unparseCondition(item)
3318
3319            else:  # bad dict
3320                raise TypeError(
3321                    f"Invalid consequence: items in the list must be"
3322                    f" Effects, Challenges, or Conditions (got a dictionary"
3323                    f" without 'skills', 'value', or 'condition' keys)."
3324                    f"\nGot item: {repr(item)}"
3325                )
3326            result += '; '
3327
3328        if result.endswith('; '):
3329            result = result[:-2]
3330
3331        return result + self.formatDict[Lexeme.closeCurly]
3332
3333    def parseMechanismSpecifierFromTokens(
3334        self,
3335        tokens: LexedTokens,
3336        start: int = 0
3337    ) -> Tuple[base.MechanismSpecifier, int]:
3338        """
3339        Parses a mechanism specifier starting at the specified position
3340        in the given tokens list. No ending position is specified, but
3341        instead this function returns a tuple containing the parsed
3342        `base.MechanismSpecifier` along with an index in the tokens list
3343        where the end of the specifier was found.
3344
3345        For example:
3346
3347        >>> pf = ParseFormat()
3348        >>> pf.parseMechanismSpecifierFromTokens(['m'])
3349        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3350 name='m'), 0)
3351        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
3352        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3353 name='a'), 0)
3354        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
3355        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3356 name='m'), 1)
3357        >>> pf.parseMechanismSpecifierFromTokens(
3358        ...     ['a', Lexeme.domainSeparator, 'm']
3359        ... )
3360        (MechanismSpecifier(domain='a', zone=None, decision=None,\
3361 name='m'), 2)
3362        >>> pf.parseMechanismSpecifierFromTokens(
3363        ...     ['a', Lexeme.zoneSeparator, 'm']
3364        ... )
3365        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3366 name='m'), 2)
3367        >>> pf.parseMechanismSpecifierFromTokens(
3368        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
3369        ... )
3370        (MechanismSpecifier(domain=None, zone='a', decision='b',\
3371 name='m'), 4)
3372        >>> pf.parseMechanismSpecifierFromTokens(
3373        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
3374        ... )
3375        (MechanismSpecifier(domain='a', zone=None, decision='b',\
3376 name='m'), 4)
3377        >>> pf.parseMechanismSpecifierFromTokens(
3378        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
3379        ... )
3380        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3381 name='b'), 2)
3382        >>> pf.parseMechanismSpecifierFromTokens(
3383        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3384        ...     1
3385        ... )
3386        Traceback (most recent call last):
3387        ...
3388        exploration.parsing.ParseError...
3389        >>> pf.parseMechanismSpecifierFromTokens(
3390        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3391        ...     2
3392        ... )
3393        (MechanismSpecifier(domain='b', zone=None, decision=None,\
3394 name='m'), 4)
3395        >>> pf.parseMechanismSpecifierFromTokens(
3396        ...     [
3397        ...         'a',
3398        ...         Lexeme.domainSeparator,
3399        ...         'b',
3400        ...         Lexeme.zoneSeparator,
3401        ...         'c',
3402        ...         Lexeme.zoneSeparator,
3403        ...         'm'
3404        ...     ]
3405        ... )
3406        (MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
3407        >>> pf.parseMechanismSpecifierFromTokens(
3408        ...     [
3409        ...         'a',
3410        ...         Lexeme.domainSeparator,
3411        ...         'b',
3412        ...         Lexeme.zoneSeparator,
3413        ...         'c',
3414        ...         Lexeme.zoneSeparator,
3415        ...         'm'
3416        ...     ],
3417        ...     2
3418        ... )
3419        (MechanismSpecifier(domain=None, zone='b', decision='c',\
3420 name='m'), 6)
3421        >>> pf.parseMechanismSpecifierFromTokens(
3422        ...     [
3423        ...         'a',
3424        ...         Lexeme.domainSeparator,
3425        ...         'b',
3426        ...         Lexeme.zoneSeparator,
3427        ...         'c',
3428        ...         Lexeme.zoneSeparator,
3429        ...         'm'
3430        ...     ],
3431        ...     4
3432        ... )
3433        (MechanismSpecifier(domain=None, zone=None, decision='c',\
3434 name='m'), 6)
3435        >>> pf.parseMechanismSpecifierFromTokens(
3436        ...     [
3437        ...         'roomB',
3438        ...         Lexeme.zoneSeparator,
3439        ...         'switch',
3440        ...         Lexeme.mechanismSeparator,
3441        ...         'on'
3442        ...     ]
3443        ... )
3444        (MechanismSpecifier(domain=None, zone=None, decision='roomB',\
3445 name='switch'), 2)
3446        """
3447        start, tEnd, nLeft = normalizeEnds(tokens, start, -1)
3448
3449        try:
3450            dSpec, dEnd = self.parseDecisionSpecifierFromTokens(
3451                tokens,
3452                start
3453            )
3454        except ParseError:
3455            raise ParseError(
3456                "Failed to parse mechanism specifier couldn't parse"
3457                " initial mechanism name."
3458            )
3459
3460        if isinstance(dSpec, int):
3461            raise ParseError(
3462                f"Invalid mechanism specifier: cannot use a decision ID"
3463                f" as the decision part. Got: {tokens[start:]}"
3464            )
3465            # TODO: Allow that?
3466
3467        mDomain = dSpec.domain
3468        if dEnd == tEnd or dEnd == tEnd - 1:
3469            return (
3470                base.MechanismSpecifier(
3471                    domain=mDomain,
3472                    zone=None,
3473                    decision=dSpec.zone,
3474                    name=dSpec.name
3475                ),
3476                dEnd
3477            )
3478
3479        sep = tokens[dEnd + 1]
3480        after = tokens[dEnd + 2]
3481
3482        if sep == Lexeme.zoneSeparator:
3483            if isinstance(after, Lexeme):
3484                return (
3485                    base.MechanismSpecifier(
3486                        domain=mDomain,
3487                        zone=None,
3488                        decision=dSpec.zone,
3489                        name=dSpec.name
3490                    ),
3491                    dEnd
3492                )
3493            else:
3494                return (
3495                    base.MechanismSpecifier(
3496                        domain=mDomain,
3497                        zone=dSpec.zone,
3498                        decision=dSpec.name,
3499                        name=after
3500                    ),
3501                    dEnd + 2
3502                )
3503        else:
3504            return (
3505                base.MechanismSpecifier(
3506                    domain=mDomain,
3507                    zone=None,
3508                    decision=dSpec.zone,
3509                    name=dSpec.name
3510                ),
3511                dEnd
3512            )
3513
3514    def groupReqTokens(
3515        self,
3516        tokens: LexedTokens,
3517        start: int = 0,
3518        end: int = -1
3519    ) -> GroupedTokens:
3520        """
3521        Groups tokens for a requirement, stripping out all parentheses
3522        but replacing parenthesized expressions with sub-lists of tokens.
3523
3524        For example:
3525
3526        >>> pf = ParseFormat()
3527        >>> pf.groupReqTokens(['jump'])
3528        ['jump']
3529        >>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
3530        Traceback (most recent call last):
3531        ...
3532        exploration.parsing.ParseError...
3533        >>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
3534        Traceback (most recent call last):
3535        ...
3536        exploration.parsing.ParseError...
3537        >>> pf.groupReqTokens(['jump', Lexeme.closeParen])
3538        Traceback (most recent call last):
3539        ...
3540        exploration.parsing.ParseError...
3541        >>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
3542        [['jump']]
3543        >>> pf.groupReqTokens(
3544        ...     [
3545        ...         Lexeme.openParen,
3546        ...         'jump',
3547        ...         Lexeme.orBar,
3548        ...         'climb',
3549        ...         Lexeme.closeParen,
3550        ...         Lexeme.ampersand,
3551        ...         'crawl',
3552        ...     ]
3553        ... )
3554        [['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>,\
3555 'crawl']
3556        """
3557        start, end, nTokens = normalizeEnds(tokens, start, end)
3558        if nTokens == 0:
3559            raise ParseError("Ran out of tokens.")
3560
3561        resultsStack: List[GroupedTokens] = [[]]
3562        here = start
3563        while here <= end:
3564            token = tokens[here]
3565            here += 1
3566            if token == Lexeme.closeParen:
3567                if len(resultsStack) == 1:
3568                    raise ParseError(
3569                        f"Too many closing parens at index {here - 1}"
3570                        f" in:\n{tokens[start:end + 1]}"
3571                    )
3572                else:
3573                    closed = resultsStack.pop()
3574                    resultsStack[-1].append(closed)
3575            elif token == Lexeme.openParen:
3576                resultsStack.append([])
3577            else:
3578                resultsStack[-1].append(token)
3579        if len(resultsStack) != 1:
3580            raise ParseError(
3581                f"Mismatched parentheses in tokens:"
3582                f"\n{tokens[start:end + 1]}"
3583            )
3584        return resultsStack[0]
3585
3586    def groupReqTokensByPrecedence(
3587        self,
3588        tokenGroups: GroupedTokens
3589    ) -> GroupedRequirementParts:
3590        """
3591        Re-groups requirement tokens that have been grouped using
3592        `groupReqTokens` according to operator precedence, effectively
3593        creating an equivalent result which would have been obtained by
3594        `groupReqTokens` if all possible non-redundant explicit
3595        parentheses had been included.
3596
3597        Also turns each leaf part into a `Requirement`.
3598
3599        TODO: Make this actually reasonably efficient T_T
3600
3601        Examples:
3602
3603        >>> pf = ParseFormat()
3604        >>> r = pf.parseRequirement('capability&roomB::switch:on')
3605        >>> pf.groupReqTokensByPrecedence(
3606        ...     [
3607        ...         ['jump', Lexeme.orBar, 'climb'],
3608        ...         Lexeme.ampersand,
3609        ...         Lexeme.notMarker,
3610        ...         'coin',
3611        ...         Lexeme.tokenCount,
3612        ...         '3'
3613        ...     ]
3614        ... )
3615        [\
3616[\
3617[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]],\
3618 <Lexeme.ampersand: ...>,\
3619 [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]\
3620]\
3621]
3622        """
3623        subgrouped: List[Union[Lexeme, str, GroupedRequirementParts]] = []
3624        # First recursively group all parenthesized expressions
3625        for i, item in enumerate(tokenGroups):
3626            if isinstance(item, list):
3627                subgrouped.append(self.groupReqTokensByPrecedence(item))
3628            else:
3629                subgrouped.append(item)
3630
3631        # Now process all leaf requirements
3632        leavesConverted: GroupedRequirementParts = []
3633        i = 0
3634        while i < len(subgrouped):
3635            gItem = subgrouped[i]
3636
3637            if isinstance(gItem, list):
3638                leavesConverted.append(gItem)
3639            elif isinstance(gItem, Lexeme):
3640                leavesConverted.append(gItem)
3641            elif i == len(subgrouped) - 1:
3642                if isinstance(gItem, Lexeme):
3643                    raise ParseError(
3644                        f"Lexeme at end of requirement. Grouped tokens:"
3645                        f"\n{tokenGroups}"
3646                    )
3647                else:
3648                    assert isinstance(gItem, str)
3649                    if gItem == 'X':
3650                        leavesConverted.append(base.ReqImpossible())
3651                    elif gItem == 'O':
3652                        leavesConverted.append(base.ReqNothing())
3653                    else:
3654                        leavesConverted.append(base.ReqCapability(gItem))
3655            else:
3656                assert isinstance(gItem, str)
3657                try:
3658                    # TODO: Avoid list copy here...
3659                    couldBeMechanismSpecifier: LexedTokens = []
3660                    for ii in range(i, len(subgrouped)):
3661                        lexemeOrStr = subgrouped[ii]
3662                        if isinstance(lexemeOrStr, (Lexeme, str)):
3663                            couldBeMechanismSpecifier.append(lexemeOrStr)
3664                        else:
3665                            break
3666                    mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
3667                        couldBeMechanismSpecifier
3668                    )
3669                    mEnd += i
3670                    if (
3671                        mEnd >= len(subgrouped) - 2
3672                     or subgrouped[mEnd + 1] != Lexeme.mechanismSeparator
3673                    ):
3674                        raise ParseError("Not a mechanism requirement.")
3675
3676                    mState = subgrouped[mEnd + 2]
3677                    if not isinstance(mState, base.MechanismState):
3678                        raise ParseError("Not a mechanism requirement.")
3679                    leavesConverted.append(base.ReqMechanism(mSpec, mState))
3680                    i = mEnd + 2  # + 1 will happen automatically below
3681                except ParseError:
3682                    following = subgrouped[i + 1]
3683                    if following in (
3684                        Lexeme.tokenCount,
3685                        Lexeme.mechanismSeparator,
3686                        Lexeme.wigglyLine,
3687                        Lexeme.skillLevel
3688                    ):
3689                        if (
3690                            i == len(subgrouped) - 2
3691                         or isinstance(subgrouped[i + 2], Lexeme)
3692                        ):
3693                            if following == Lexeme.wigglyLine:
3694                                # Default tag value is 1
3695                                leavesConverted.append(base.ReqTag(gItem, 1))
3696                                i += 1  # another +1 automatic below
3697                            else:
3698                                raise ParseError(
3699                                    f"Lexeme at end of requirement. Grouped"
3700                                    f" tokens:\n{tokenGroups}"
3701                                )
3702                        else:
3703                            afterwards = subgrouped[i + 2]
3704                            if not isinstance(afterwards, str):
3705                                raise ParseError(
3706                                    f"Lexeme after token/mechanism/tag/skill"
3707                                    f" separator at index {i}."
3708                                    f" Grouped tokens:\n{tokenGroups}"
3709                                )
3710                            i += 2  # another +1 automatic below
3711                            if following == Lexeme.tokenCount:
3712                                try:
3713                                    tCount = int(afterwards)
3714                                except ValueError:
3715                                    raise ParseError(
3716                                        f"Token count could not be"
3717                                        f" parsed as an integer:"
3718                                        f" {afterwards!r}. Grouped"
3719                                        f" tokens:\n{tokenGroups}"
3720                                    )
3721                                leavesConverted.append(
3722                                    base.ReqTokens(gItem, tCount)
3723                                )
3724                            elif following == Lexeme.mechanismSeparator:
3725                                leavesConverted.append(
3726                                    base.ReqMechanism(gItem, afterwards)
3727                                )
3728                            elif following == Lexeme.wigglyLine:
3729                                tVal = self.parseTagValue(afterwards)
3730                                leavesConverted.append(
3731                                    base.ReqTag(gItem, tVal)
3732                                )
3733                            else:
3734                                assert following == Lexeme.skillLevel
3735                                try:
3736                                    sLevel = int(afterwards)
3737                                except ValueError:
3738                                    raise ParseError(
3739                                        f"Skill level could not be"
3740                                        f" parsed as an integer:"
3741                                        f" {afterwards!r}. Grouped"
3742                                        f" tokens:\n{tokenGroups}"
3743                                    )
3744                                leavesConverted.append(
3745                                    base.ReqLevel(gItem, sLevel)
3746                                )
3747                    else:
3748                        if gItem == 'X':
3749                            leavesConverted.append(base.ReqImpossible())
3750                        elif gItem == 'O':
3751                            leavesConverted.append(base.ReqNothing())
3752                        else:
3753                            leavesConverted.append(
3754                                base.ReqCapability(gItem)
3755                            )
3756
3757            # Finally, increment our index:
3758            i += 1
3759
3760        # Now group all NOT operators
3761        i = 0
3762        notsGrouped: GroupedRequirementParts = []
3763        while i < len(leavesConverted):
3764            leafItem = leavesConverted[i]
3765            group = []
3766            while leafItem == Lexeme.notMarker:
3767                group.append(leafItem)
3768                i += 1
3769                if i >= len(leavesConverted):
3770                    raise ParseError(
3771                        f"NOT at end of tokens:\n{leavesConverted}"
3772                    )
3773                leafItem = leavesConverted[i]
3774            if group == []:
3775                notsGrouped.append(leafItem)
3776                i += 1
3777            else:
3778                group.append(leafItem)
3779                i += 1
3780                notsGrouped.append(group)
3781
3782        # Next group all AND operators
3783        i = 0
3784        andsGrouped: GroupedRequirementParts = []
3785        while i < len(notsGrouped):
3786            notGroupItem = notsGrouped[i]
3787            if notGroupItem == Lexeme.ampersand:
3788                if i == len(notsGrouped) - 1:
3789                    raise ParseError(
3790                        f"AND at end of group in tokens:"
3791                        f"\n{tokenGroups}"
3792                        f"Which had been grouped into:"
3793                        f"\n{notsGrouped}"
3794                    )
3795                itemAfter = notsGrouped[i + 1]
3796                if isinstance(itemAfter, Lexeme):
3797                    raise ParseError(
3798                        f"Lexeme after AND in of group in tokens:"
3799                        f"\n{tokenGroups}"
3800                        f"Which had been grouped into:"
3801                        f"\n{notsGrouped}"
3802                    )
3803                assert isinstance(itemAfter, (base.Requirement, list))
3804                prev = andsGrouped[-1]
3805                if (
3806                    isinstance(prev, list)
3807                and len(prev) > 2
3808                and prev[1] == Lexeme.ampersand
3809                ):
3810                    prev.extend(notsGrouped[i:i + 2])
3811                    i += 1  # with an extra +1 below
3812                else:
3813                    andsGrouped.append(
3814                        [andsGrouped.pop()] + notsGrouped[i:i + 2]
3815                    )
3816                    i += 1 # extra +1 below
3817            else:
3818                andsGrouped.append(notGroupItem)
3819            i += 1
3820
3821        # Finally check that we only have OR operators left over
3822        i = 0
3823        finalResult: GroupedRequirementParts = []
3824        while i < len(andsGrouped):
3825            andGroupItem = andsGrouped[i]
3826            if andGroupItem == Lexeme.orBar:
3827                if i == len(andsGrouped) - 1:
3828                    raise ParseError(
3829                        f"OR at end of group in tokens:"
3830                        f"\n{tokenGroups}"
3831                        f"Which had been grouped into:"
3832                        f"\n{andsGrouped}"
3833                    )
3834                itemAfter = andsGrouped[i + 1]
3835                if isinstance(itemAfter, Lexeme):
3836                    raise ParseError(
3837                        f"Lexeme after OR in of group in tokens:"
3838                        f"\n{tokenGroups}"
3839                        f"Which had been grouped into:"
3840                        f"\n{andsGrouped}"
3841                    )
3842                assert isinstance(itemAfter, (base.Requirement, list))
3843                prev = finalResult[-1]
3844                if (
3845                    isinstance(prev, list)
3846                and len(prev) > 2
3847                and prev[1] == Lexeme.orBar
3848                ):
3849                    prev.extend(andsGrouped[i:i + 2])
3850                    i += 1  # with an extra +1 below
3851                else:
3852                    finalResult.append(
3853                        [finalResult.pop()] + andsGrouped[i:i + 2]
3854                    )
3855                    i += 1 # extra +1 below
3856            elif isinstance(andGroupItem, Lexeme):
3857                raise ParseError(
3858                    f"Leftover lexeme when grouping ORs at index {i}"
3859                    f" in grouped tokens:\n{andsGrouped}"
3860                    f"\nOriginal tokens were:\n{tokenGroups}"
3861                )
3862            else:
3863                finalResult.append(andGroupItem)
3864            i += 1
3865
3866        return finalResult
3867
3868    def parseRequirementFromRegroupedTokens(
3869        self,
3870        reqGroups: GroupedRequirementParts
3871    ) -> base.Requirement:
3872        """
3873        Recursive parser that works once tokens have been turned into
3874        requirements at the leaves and grouped by operator precedence
3875        otherwise (see `groupReqTokensByPrecedence`).
3876
3877        TODO: Simply by just doing this while grouping... ?
3878        """
3879        if len(reqGroups) == 0:
3880            raise ParseError("Ran out of tokens.")
3881
3882        elif len(reqGroups) == 1:
3883            only = reqGroups[0]
3884            if isinstance(only, list):
3885                return self.parseRequirementFromRegroupedTokens(only)
3886            elif isinstance(only, base.Requirement):
3887                return only
3888            else:
3889                raise ParseError(f"Invalid singleton group:\n{only}")
3890        elif reqGroups[0] == Lexeme.notMarker:
3891            if (
3892                not all(x == Lexeme.notMarker for x in reqGroups[:-1])
3893             or not isinstance(reqGroups[-1], (list, base.Requirement))
3894            ):
3895                raise ParseError(f"Invalid negation group:\n{reqGroups}")
3896            result = reqGroups[-1]
3897            if isinstance(result, list):
3898                result = self.parseRequirementFromRegroupedTokens(result)
3899            assert isinstance(result, base.Requirement)
3900            for i in range(len(reqGroups) - 1):
3901                result = base.ReqNot(result)
3902            return result
3903        elif len(reqGroups) % 2 == 0:
3904            raise ParseError(f"Even-length non-negation group:\n{reqGroups}")
3905        else:
3906            if (
3907                reqGroups[1] not in (Lexeme.ampersand, Lexeme.orBar)
3908             or not all(
3909                    reqGroups[i] == reqGroups[1]
3910                    for i in range(1, len(reqGroups), 2)
3911                )
3912            ):
3913                raise ParseError(
3914                    f"Inconsistent operator(s) in group:\n{reqGroups}"
3915                )
3916            op = reqGroups[1]
3917            operands = [
3918                (
3919                    self.parseRequirementFromRegroupedTokens(x)
3920                    if isinstance(x, list)
3921                    else x
3922                )
3923                for x in reqGroups[::2]
3924            ]
3925            if not all(isinstance(x, base.Requirement) for x in operands):
3926                raise ParseError(
3927                    f"Item not reducible to Requirement in AND group:"
3928                    f"\n{reqGroups}"
3929                )
3930            reqSequence = cast(Sequence[base.Requirement], operands)
3931            if op == Lexeme.ampersand:
3932                return base.ReqAll(reqSequence).flatten()
3933            else:
3934                assert op == Lexeme.orBar
3935                return base.ReqAny(reqSequence).flatten()
3936
3937    def parseRequirementFromGroupedTokens(
3938        self,
3939        tokenGroups: GroupedTokens
3940    ) -> base.Requirement:
3941        """
3942        Parses a `base.Requirement` from a pre-grouped tokens list (see
3943        `groupReqTokens`). Uses the 'orBar', 'ampersand', 'notMarker',
3944        'tokenCount', and 'mechanismSeparator' `Lexeme`s to provide
3945        'or', 'and', and 'not' operators along with distinguishing
3946        between capabilities, tokens, and mechanisms.
3947
3948        Precedence ordering is not, then and, then or, but you are
3949        encouraged to use parentheses for explicit grouping (the
3950        'openParen' and 'closeParen' `Lexeme`s, although these must be
3951        handled by `groupReqTokens` so this function won't see them
3952        directly).
3953
3954        You can also use 'X' (without quotes) for a never-satisfied
3955        requirement, and 'O' (without quotes) for an always-satisfied
3956        requirement.
3957
3958        Note that when '!' is applied to a token requirement it flips
3959        the sense of the integer from 'must have at least this many' to
3960        'must have strictly less than this many'.
3961
3962        Raises a `ParseError` if the grouped tokens it is given cannot
3963        be parsed as a `Requirement`.
3964
3965        Examples:
3966
3967        >>> pf = ParseFormat()
3968        >>> pf.parseRequirementFromGroupedTokens(['capability'])
3969        ReqCapability('capability')
3970        >>> pf.parseRequirementFromGroupedTokens(
3971        ...     ['token', Lexeme.tokenCount, '3']
3972        ... )
3973        ReqTokens('token', 3)
3974        >>> pf.parseRequirementFromGroupedTokens(
3975        ...     ['mechanism', Lexeme.mechanismSeparator, 'state']
3976        ... )
3977        ReqMechanism('mechanism', 'state')
3978        >>> pf.parseRequirementFromGroupedTokens(
3979        ...     ['capability', Lexeme.orBar, 'token',
3980        ...      Lexeme.tokenCount, '3']
3981        ... )
3982        ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
3983        >>> pf.parseRequirementFromGroupedTokens(
3984        ...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
3985        ... )
3986        ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]),\
3987 ReqCapability('three')])
3988        >>> pf.parseRequirementFromGroupedTokens(
3989        ...     [
3990        ...         'one',
3991        ...         Lexeme.ampersand,
3992        ...         [
3993        ...              'two',
3994        ...              Lexeme.orBar,
3995        ...              'three'
3996        ...         ]
3997        ...     ]
3998        ... )
3999        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4000 ReqCapability('three')])])
4001        >>> pf.parseRequirementFromTokens(['X'])
4002        ReqImpossible()
4003        >>> pf.parseRequirementFromTokens(['O'])
4004        ReqNothing()
4005        >>> pf.parseRequirementFromTokens(
4006        ...     [Lexeme.openParen, 'O', Lexeme.closeParen]
4007        ... )
4008        ReqNothing()
4009        """
4010        if len(tokenGroups) == 0:
4011            raise ParseError("Ran out of tokens.")
4012
4013        reGrouped = self.groupReqTokensByPrecedence(tokenGroups)
4014
4015        return self.parseRequirementFromRegroupedTokens(reGrouped)
4016
4017    def parseRequirementFromTokens(
4018        self,
4019        tokens: LexedTokens,
4020        start: int = 0,
4021        end: int = -1
4022    ) -> base.Requirement:
4023        """
4024        Parses a requirement from `LexedTokens` by grouping them first
4025        and then using `parseRequirementFromGroupedTokens`.
4026
4027        For example:
4028
4029        >>> pf = ParseFormat()
4030        >>> pf.parseRequirementFromTokens(
4031        ...     [
4032        ...         'one',
4033        ...         Lexeme.ampersand,
4034        ...         Lexeme.openParen,
4035        ...         'two',
4036        ...         Lexeme.orBar,
4037        ...         'three',
4038        ...         Lexeme.closeParen
4039        ...     ]
4040        ... )
4041        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4042 ReqCapability('three')])])
4043        """
4044        grouped = self.groupReqTokens(tokens, start, end)
4045        return self.parseRequirementFromGroupedTokens(grouped)
4046
4047    def parseRequirement(self, encoded: str) -> base.Requirement:
4048        """
4049        Parses a `base.Requirement` from a string by calling `lex` and
4050        then feeding it into `ParseFormat.parseRequirementFromTokens`.
4051        As stated in `parseRequirementFromTokens`, the precedence
4052        binding order is NOT, then AND, then OR.
4053
4054        For example:
4055
4056        >>> pf = ParseFormat()
4057        >>> pf.parseRequirement('! coin * 3')
4058        ReqNot(ReqTokens('coin', 3))
4059        >>> pf.parseRequirement(
4060        ...     '  oneWord | "two words"|"three  words words" '
4061        ... )
4062        ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'),\
4063 ReqCapability('"three  words words"')])
4064        >>> pf.parseRequirement('words-with-dashes')
4065        ReqCapability('words-with-dashes')
4066        >>> r = pf.parseRequirement('capability&roomB::switch:on')
4067        >>> r
4068        ReqAll([ReqCapability('capability'),\
4069 ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB',\
4070 name='switch'), 'on')])
4071        >>> r.unparse()
4072        '(capability&roomB::switch:on)'
4073        >>> pf.parseRequirement('!!!one')
4074        ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
4075        >>> pf.parseRequirement('domain//zone::where::mechanism:state')
4076        ReqMechanism(MechanismSpecifier(domain='domain', zone='zone',\
4077 decision='where', name='mechanism'), 'state')
4078        >>> pf.parseRequirement('domain//mechanism:state')
4079        ReqMechanism(MechanismSpecifier(domain='domain', zone=None,\
4080 decision=None, name='mechanism'), 'state')
4081        >>> pf.parseRequirement('where::mechanism:state')
4082        ReqMechanism(MechanismSpecifier(domain=None, zone=None,\
4083 decision='where', name='mechanism'), 'state')
4084        >>> pf.parseRequirement('zone::where::mechanism:state')
4085        ReqMechanism(MechanismSpecifier(domain=None, zone='zone',\
4086 decision='where', name='mechanism'), 'state')
4087        >>> pf.parseRequirement('tag~')
4088        ReqTag('tag', 1)
4089        >>> pf.parseRequirement('tag~&tag2~')
4090        ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
4091        >>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
4092        ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3),\
4093 ReqTag('tag', 3.5), ReqLevel('skill', 3)])
4094        >>> pf.parseRequirement('tag~True|tag~False|tag~None')
4095        ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])
4096
4097        Precedence examples:
4098
4099        >>> pf.parseRequirement('A|B&C')
4100        ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'),\
4101 ReqCapability('C')])])
4102        >>> pf.parseRequirement('A&B|C')
4103        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4104 ReqCapability('C')])
4105        >>> pf.parseRequirement('(A&B)|C')
4106        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4107 ReqCapability('C')])
4108        >>> pf.parseRequirement('(A&B|C)&D')
4109        ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4110 ReqCapability('C')]), ReqCapability('D')])
4111
4112        Error examples:
4113
4114        >>> pf.parseRequirement('one ! Word')
4115        Traceback (most recent call last):
4116        ...
4117        exploration.parsing.ParseError...
4118        >>> pf.parseRequirement('a|')
4119        Traceback (most recent call last):
4120        ...
4121        exploration.parsing.ParseError...
4122        >>> pf.parseRequirement('b!')
4123        Traceback (most recent call last):
4124        ...
4125        exploration.parsing.ParseError...
4126        >>> pf.parseRequirement('*emph*')
4127        Traceback (most recent call last):
4128        ...
4129        exploration.parsing.ParseError...
4130        >>> pf.parseRequirement('one&&two')
4131        Traceback (most recent call last):
4132        ...
4133        exploration.parsing.ParseError...
4134        >>> pf.parseRequirement('one!|two')
4135        Traceback (most recent call last):
4136        ...
4137        exploration.parsing.ParseError...
4138        >>> pf.parseRequirement('one*two')
4139        Traceback (most recent call last):
4140        ...
4141        exploration.parsing.ParseError...
4142        >>> pf.parseRequirement('one*')
4143        Traceback (most recent call last):
4144        ...
4145        exploration.parsing.ParseError...
4146        >>> pf.parseRequirement('()')
4147        Traceback (most recent call last):
4148        ...
4149        exploration.parsing.ParseError...
4150        >>> pf.parseRequirement('(one)*3')
4151        Traceback (most recent call last):
4152        ...
4153        exploration.parsing.ParseError...
4154        >>> pf.parseRequirement('a:')
4155        Traceback (most recent call last):
4156        ...
4157        exploration.parsing.ParseError...
4158        >>> pf.parseRequirement('a:b:c')
4159        Traceback (most recent call last):
4160        ...
4161        exploration.parsing.ParseError...
4162        >>> pf.parseRequirement('where::capability')
4163        Traceback (most recent call last):
4164        ...
4165        exploration.parsing.ParseError...
4166        """
4167        return self.parseRequirementFromTokens(
4168            lex(encoded, self.reverseFormat)
4169        )
4170
4171    def parseSkillCombinationFromTokens(
4172        self,
4173        tokens: LexedTokens,
4174        start: int = 0,
4175        end: int = -1
4176    ) -> Union[base.Skill, base.SkillCombination]:
4177        """
4178        Parses a skill combination from the specified range within the
4179        given tokens list. If just a single string token is selected, it
4180        will be returned as a `base.BestSkill` with just that skill
4181        inside.
4182
4183        For example:
4184
4185        >>> pf = ParseFormat()
4186        >>> pf.parseSkillCombinationFromTokens(['climbing'])
4187        BestSkill('climbing')
4188        >>> tokens = [
4189        ...     'best',
4190        ...     Lexeme.openParen,
4191        ...     'brains',
4192        ...     Lexeme.sepOrDelay,
4193        ...     'brawn',
4194        ...     Lexeme.closeParen,
4195        ... ]
4196        >>> pf.parseSkillCombinationFromTokens(tokens)
4197        BestSkill('brains', 'brawn')
4198        >>> tokens[2] = '3'  # not a lexeme so it's a string
4199        >>> pf.parseSkillCombinationFromTokens(tokens)
4200        BestSkill(3, 'brawn')
4201        >>> tokens = [
4202        ...     Lexeme.wigglyLine,
4203        ...     Lexeme.wigglyLine,
4204        ...     'yes',
4205        ... ]
4206        >>> pf.parseSkillCombinationFromTokens(tokens)
4207        InverseSkill(InverseSkill('yes'))
4208        """
4209        start, end, nTokens = normalizeEnds(tokens, start, end)
4210
4211        first = tokens[start]
4212        if nTokens == 1:
4213            if isinstance(first, base.Skill):
4214                try:
4215                    level = int(first)
4216                    return base.BestSkill(level)
4217                except ValueError:
4218                    return base.BestSkill(first)
4219            else:
4220                raise ParseError(
4221                    "Invalid SkillCombination:\n{tokens[start:end + 1]"
4222                )
4223
4224        if first == Lexeme.wigglyLine:
4225            inv = self.parseSkillCombinationFromTokens(
4226                tokens,
4227                start + 1,
4228                end
4229            )
4230            if isinstance(inv, base.BestSkill) and len(inv.skills) == 1:
4231                return base.InverseSkill(inv.skills[0])
4232            else:
4233                return base.InverseSkill(inv)
4234
4235        second = tokens[start + 1]
4236        if second != Lexeme.openParen:
4237            raise ParseError(
4238                f"Invalid SkillCombination (missing paren):"
4239                f"\n{tokens[start:end + 1]}"
4240            )
4241
4242        parenEnd = self.matchingBrace(
4243            tokens,
4244            start + 1,
4245            Lexeme.openParen,
4246            Lexeme.closeParen
4247        )
4248        if parenEnd != end:
4249            raise ParseError(
4250                f"Extra junk after SkillCombination:"
4251                f"\n{tokens[parenEnd + 1:end + 1]}"
4252            )
4253
4254        if first == 'if':
4255            parts = list(
4256                findSeparatedParts(
4257                    tokens,
4258                    Lexeme.sepOrDelay,
4259                    start + 2,
4260                    end - 1,
4261                    Lexeme.openParen,
4262                    Lexeme.closeParen
4263                )
4264            )
4265            if len(parts) != 3:
4266                raise ParseError(
4267                    f"Wrong number of parts for ConditionalSkill (needs"
4268                    f" 3, got {len(parts)}:"
4269                    f"\n{tokens[start + 2:end]}"
4270                )
4271            reqStart, reqEnd = parts[0]
4272            ifStart, ifEnd = parts[1]
4273            elseStart, elseEnd = parts[2]
4274            return base.ConditionalSkill(
4275                self.parseRequirementFromTokens(tokens, reqStart, reqEnd),
4276                self.parseSkillCombinationFromTokens(tokens, ifStart, ifEnd),
4277                self.parseSkillCombinationFromTokens(
4278                    tokens,
4279                    elseStart,
4280                    elseEnd
4281                ),
4282            )
4283        elif first in ('sum', 'best', 'worst'):
4284            make: type[base.SkillCombination]
4285            if first == 'sum':
4286                make = base.CombinedSkill
4287            elif first == 'best':
4288                make = base.BestSkill
4289            else:
4290                make = base.WorstSkill
4291
4292            subs = []
4293            for partStart, partEnd in findSeparatedParts(
4294                tokens,
4295                Lexeme.sepOrDelay,
4296                start + 2,
4297                end - 1,
4298                Lexeme.openParen,
4299                Lexeme.closeParen
4300            ):
4301                sub = self.parseSkillCombinationFromTokens(
4302                    tokens,
4303                    partStart,
4304                    partEnd
4305                )
4306                if (
4307                    isinstance(sub, base.BestSkill)
4308                and len(sub.skills) == 1
4309                ):
4310                    subs.append(sub.skills[0])
4311                else:
4312                    subs.append(sub)
4313
4314            return make(*subs)
4315        else:
4316            raise ParseError(
4317                "Invalid SkillCombination:\n{tokens[start:end + 1]"
4318            )
4319
4320    def parseSkillCombination(
4321        self,
4322        encoded: str
4323    ) -> base.SkillCombination:
4324        """
4325        Parses a `SkillCombination` from a string. Calls `lex` and then
4326        `parseSkillCombinationFromTokens`.
4327        """
4328        result = self.parseSkillCombinationFromTokens(
4329            lex(encoded, self.reverseFormat)
4330        )
4331        if not isinstance(result, base.SkillCombination):
4332            return base.BestSkill(result)
4333        else:
4334            return result
4335
4336    def parseConditionFromTokens(
4337        self,
4338        tokens: LexedTokens,
4339        start: int = 0,
4340        end: int = -1
4341    ) -> base.Condition:
4342        """
4343        Parses a `base.Condition` from a lexed tokens list. For example:
4344
4345        >>> pf = ParseFormat()
4346        >>> tokens = [
4347        ...     Lexeme.doubleQuestionmark,
4348        ...     Lexeme.openParen,
4349        ...     "fire",
4350        ...     Lexeme.ampersand,
4351        ...     "water",
4352        ...     Lexeme.closeParen,
4353        ...     Lexeme.openCurly,
4354        ...     "gain",
4355        ...     "wind",
4356        ...     Lexeme.closeCurly,
4357        ...     Lexeme.openCurly,
4358        ...     Lexeme.closeCurly,
4359        ... ]
4360        >>> pf.parseConditionFromTokens(tokens) == base.condition(
4361        ...     condition=base.ReqAll([
4362        ...         base.ReqCapability('fire'),
4363        ...         base.ReqCapability('water')
4364        ...     ]),
4365        ...     consequence=[base.effect(gain='wind')]
4366        ... )
4367        True
4368        """
4369        start, end, nTokens = normalizeEnds(tokens, start, end)
4370        if nTokens < 8:
4371            raise ParseError(
4372                f"A Condition requires at least 8 tokens (got {nTokens})."
4373            )
4374        if tokens[start] != Lexeme.doubleQuestionmark:
4375            raise ParseError(
4376                f"A Condition must start with"
4377                f" {repr(self.formatDict[Lexeme.doubleQuestionmark])}"
4378            )
4379        try:
4380            consequenceStart = tokens.index(Lexeme.openCurly, start)
4381        except ValueError:
4382            raise ParseError("A condition must include a consequence block.")
4383        consequenceEnd = self.matchingBrace(tokens, consequenceStart)
4384        altStart = consequenceEnd + 1
4385        altEnd = self.matchingBrace(tokens, altStart)
4386
4387        if altEnd != end:
4388            raise ParseError(
4389                f"Junk after condition:\n{tokens[altEnd + 1: end + 1]}"
4390            )
4391
4392        return base.condition(
4393            condition=self.parseRequirementFromTokens(
4394                tokens,
4395                start + 1,
4396                consequenceStart - 1
4397            ),
4398            consequence=self.parseConsequenceFromTokens(
4399                tokens,
4400                consequenceStart,
4401                consequenceEnd
4402            ),
4403            alternative=self.parseConsequenceFromTokens(
4404                tokens,
4405                altStart,
4406                altEnd
4407            )
4408        )
4409
4410    def parseCondition(
4411        self,
4412        encoded: str
4413    ) -> base.Condition:
4414        """
4415        Lexes the given string and then calls `parseConditionFromTokens`
4416        to return a `base.Condition`.
4417        """
4418        return self.parseConditionFromTokens(
4419            lex(encoded, self.reverseFormat)
4420        )
4421
4422    def parseChallengeFromTokens(
4423        self,
4424        tokens: LexedTokens,
4425        start: int = 0,
4426        end: int = -1
4427    ) -> base.Challenge:
4428        """
4429        Parses a `base.Challenge` from a lexed tokens list.
4430
4431        For example:
4432
4433        >>> pf = ParseFormat()
4434        >>> tokens = [
4435        ...     Lexeme.angleLeft,
4436        ...     '2',
4437        ...     Lexeme.angleRight,
4438        ...     'best',
4439        ...     Lexeme.openParen,
4440        ...     "chess",
4441        ...     Lexeme.sepOrDelay,
4442        ...     "checkers",
4443        ...     Lexeme.closeParen,
4444        ...     Lexeme.openCurly,
4445        ...     "gain",
4446        ...     "coin",
4447        ...     Lexeme.tokenCount,
4448        ...     "5",
4449        ...     Lexeme.closeCurly,
4450        ...     Lexeme.angleRight,
4451        ...     Lexeme.openCurly,
4452        ...     "lose",
4453        ...     "coin",
4454        ...     Lexeme.tokenCount,
4455        ...     "5",
4456        ...     Lexeme.closeCurly,
4457        ... ]
4458        >>> c = pf.parseChallengeFromTokens(tokens)
4459        >>> c['skills'] == base.BestSkill('chess', 'checkers')
4460        True
4461        >>> c['level']
4462        2
4463        >>> c['success'] == [base.effect(gain=('coin', 5))]
4464        True
4465        >>> c['failure'] == [base.effect(lose=('coin', 5))]
4466        True
4467        >>> c['outcome']
4468        False
4469        >>> c == base.challenge(
4470        ...     skills=base.BestSkill('chess', 'checkers'),
4471        ...     level=2,
4472        ...     success=[base.effect(gain=('coin', 5))],
4473        ...     failure=[base.effect(lose=('coin', 5))],
4474        ...     outcome=False
4475        ... )
4476        True
4477        >>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
4478        >>> c == pf.parseChallengeFromTokens(t2, 1, -2)
4479        True
4480        """
4481        start, end, nTokens = normalizeEnds(tokens, start, end)
4482        if nTokens < 8:
4483            raise ParseError(
4484                f"Not enough tokens for a challenge: {nTokens}"
4485            )
4486        if tokens[start] != Lexeme.angleLeft:
4487            raise ParseError(
4488                f"Challenge must start with"
4489                f" {repr(self.formatDict[Lexeme.angleLeft])}"
4490            )
4491        levelStr = tokens[start + 1]
4492        if isinstance(levelStr, Lexeme):
4493            raise ParseError(
4494                f"Challenge must start with a level in angle brackets"
4495                f" (got {repr(self.formatDict[levelStr])})."
4496            )
4497        if tokens[start + 2] != Lexeme.angleRight:
4498            raise ParseError(
4499                f"Challenge must include"
4500                f" {repr(self.formatDict[Lexeme.angleRight])} after"
4501                f" the level."
4502            )
4503        try:
4504            level = int(levelStr)
4505        except ValueError:
4506            raise ParseError(
4507                f"Challenge level must be an integer (got"
4508                f" {repr(tokens[start + 1])}."
4509            )
4510        try:
4511            successStart = tokens.index(Lexeme.openCurly, start)
4512            skillsEnd = successStart - 1
4513        except ValueError:
4514            raise ParseError("A challenge must include a consequence block.")
4515
4516        outcome: Optional[bool] = None
4517        if tokens[skillsEnd] == Lexeme.angleRight:
4518            skillsEnd -= 1
4519            outcome = True
4520        successEnd = self.matchingBrace(tokens, successStart)
4521        failStart = successEnd + 1
4522        if tokens[failStart] == Lexeme.angleRight:
4523            failStart += 1
4524            if outcome is not None:
4525                raise ParseError(
4526                    "Cannot indicate both success and failure as"
4527                    " outcomes in a challenge."
4528                )
4529            outcome = False
4530        failEnd = self.matchingBrace(tokens, failStart)
4531
4532        if failEnd != end:
4533            raise ParseError(
4534                f"Junk after condition:\n{tokens[failEnd + 1:end + 1]}"
4535            )
4536
4537        skills = self.parseSkillCombinationFromTokens(
4538            tokens,
4539            start + 3,
4540            skillsEnd
4541        )
4542        if isinstance(skills, base.Skill):
4543            skills = base.BestSkill(skills)
4544
4545        return base.challenge(
4546            level=level,
4547            outcome=outcome,
4548            skills=skills,
4549            success=self.parseConsequenceFromTokens(
4550                tokens[successStart:successEnd + 1]
4551            ),
4552            failure=self.parseConsequenceFromTokens(
4553                tokens[failStart:failEnd + 1]
4554            )
4555        )
4556
4557    def parseChallenge(
4558        self,
4559        encoded: str
4560    ) -> base.Challenge:
4561        """
4562        Lexes the given string and then calls `parseChallengeFromTokens`
4563        to return a `base.Challenge`.
4564        """
4565        return self.parseChallengeFromTokens(
4566            lex(encoded, self.reverseFormat)
4567        )
4568
4569    def parseConsequenceFromTokens(
4570        self,
4571        tokens: LexedTokens,
4572        start: int = 0,
4573        end: int = -1
4574    ) -> base.Consequence:
4575        """
4576        Parses a consequence from a lexed token list. If start and/or end
4577        are specified, only processes the part of the list between those
4578        two indices (inclusive). Use `lex` to turn a string into a
4579        `LexedTokens` list (or use `ParseFormat.parseConsequence` which
4580        does that for you).
4581
4582        An example:
4583
4584        >>> pf = ParseFormat()
4585        >>> tokens = [
4586        ...     Lexeme.openCurly,
4587        ...     'gain',
4588        ...     'power',
4589        ...     Lexeme.closeCurly
4590        ... ]
4591        >>> c = pf.parseConsequenceFromTokens(tokens)
4592        >>> c == [base.effect(gain='power')]
4593        True
4594        >>> tokens.append('hi')
4595        >>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
4596        True
4597        >>> c == pf.parseConsequenceFromTokens(tokens, end=3)
4598        True
4599        """
4600        start, end, nTokens = normalizeEnds(tokens, start, end)
4601
4602        if nTokens < 2:
4603            raise ParseError("Consequence must have at least two tokens.")
4604
4605        if tokens[start] != Lexeme.openCurly:
4606            raise ParseError(
4607                f"Consequence must start with an open curly brace:"
4608                f" {repr(self.formatDict[Lexeme.openCurly])}."
4609            )
4610
4611        if tokens[end] != Lexeme.closeCurly:
4612            raise ParseError(
4613                f"Consequence must end with a closing curly brace:"
4614                f" {repr(self.formatDict[Lexeme.closeCurly])}."
4615            )
4616
4617        if nTokens == 2:
4618            return []
4619
4620        result: base.Consequence = []
4621        for partStart, partEnd in findSeparatedParts(
4622            tokens,
4623            Lexeme.consequenceSeparator,
4624            start + 1,
4625            end - 1,
4626            Lexeme.openCurly,
4627            Lexeme.closeCurly
4628        ):
4629            if partEnd - partStart < 0:
4630                raise ParseError("Empty consequence part.")
4631            if tokens[partStart] == Lexeme.angleLeft:  # a challenge
4632                result.append(
4633                    self.parseChallengeFromTokens(
4634                        tokens,
4635                        partStart,
4636                        partEnd
4637                    )
4638                )
4639            elif tokens[partStart] == Lexeme.doubleQuestionmark:  # condition
4640                result.append(
4641                    self.parseConditionFromTokens(
4642                        tokens,
4643                        partStart,
4644                        partEnd
4645                    )
4646                )
4647            else:  # Must be an effect
4648                result.append(
4649                    self.parseEffectFromTokens(
4650                        tokens,
4651                        partStart,
4652                        partEnd
4653                    )
4654                )
4655
4656        return result
4657
4658    def parseConsequence(self, encoded: str) -> base.Consequence:
4659        """
4660        Parses a consequence from a string. Uses `lex` and
4661        `ParseFormat.parseConsequenceFromTokens`. For example:
4662
4663        >>> pf = ParseFormat()
4664        >>> c = pf.parseConsequence(
4665        ...   '{gain power}'
4666        ... )
4667        >>> c == [base.effect(gain='power')]
4668        True
4669        >>> pf.unparseConsequence(c)
4670        '{gain power}'
4671        >>> c = pf.parseConsequence(
4672        ...     '{\\n'
4673        ...     '    ??(brawny|!weights*3){\\n'
4674        ...     '        <3>sum(brains, brawn){goto home}>{bounce}\\n'
4675        ...     '    }{};\\n'
4676        ...     '    lose coin*1\\n'
4677        ...     '}'
4678        ... )
4679        >>> len(c)
4680        2
4681        >>> c[0]['condition'] == base.ReqAny([
4682        ...     base.ReqCapability('brawny'),
4683        ...     base.ReqNot(base.ReqTokens('weights', 3))
4684        ... ])
4685        True
4686        >>> len(c[0]['consequence'])
4687        1
4688        >>> len(c[0]['alternative'])
4689        0
4690        >>> cons = c[0]['consequence'][0]
4691        >>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
4692        True
4693        >>> cons['level']
4694        3
4695        >>> len(cons['success'])
4696        1
4697        >>> len(cons['failure'])
4698        1
4699        >>> cons['success'][0] == base.effect(goto='home')
4700        True
4701        >>> cons['failure'][0] == base.effect(bounce=True)
4702        True
4703        >>> cons['outcome'] = False
4704        >>> c[0] == base.condition(
4705        ...     condition=base.ReqAny([
4706        ...         base.ReqCapability('brawny'),
4707        ...         base.ReqNot(base.ReqTokens('weights', 3))
4708        ...     ]),
4709        ...     consequence=[
4710        ...         base.challenge(
4711        ...             skills=base.CombinedSkill('brains', 'brawn'),
4712        ...             level=3,
4713        ...             success=[base.effect(goto='home')],
4714        ...             failure=[base.effect(bounce=True)],
4715        ...             outcome=False
4716        ...         )
4717        ...     ]
4718        ... )
4719        True
4720        >>> c[1] == base.effect(lose=('coin', 1))
4721        True
4722        """
4723        return self.parseConsequenceFromTokens(
4724            lex(encoded, self.reverseFormat)
4725        )

A ParseFormat manages the mapping from markers to entry types and vice versa.

ParseFormat( formatDict: Dict[Lexeme, str] = {<Lexeme.domainSeparator: 1>: '//', <Lexeme.zoneSeparator: 2>: '::', <Lexeme.partSeparator: 3>: '%%', <Lexeme.stateOn: 4>: '=on', <Lexeme.stateOff: 5>: '=off', <Lexeme.tokenCount: 6>: '*', <Lexeme.effectCharges: 7>: '=', <Lexeme.sepOrDelay: 8>: ',', <Lexeme.consequenceSeparator: 9>: ';', <Lexeme.inCommon: 10>: '+c', <Lexeme.isHidden: 11>: '+h', <Lexeme.skillLevel: 12>: '^', <Lexeme.wigglyLine: 13>: '~', <Lexeme.withDetails: 14>: '%', <Lexeme.reciprocalSeparator: 15>: '/', <Lexeme.mechanismSeparator: 16>: ':', <Lexeme.openCurly: 17>: '{', <Lexeme.closeCurly: 18>: '}', <Lexeme.openParen: 19>: '(', <Lexeme.closeParen: 20>: ')', <Lexeme.angleLeft: 21>: '<', <Lexeme.angleRight: 22>: '>', <Lexeme.doubleQuestionmark: 23>: '??', <Lexeme.ampersand: 24>: '&', <Lexeme.orBar: 25>: '|', <Lexeme.notMarker: 26>: '!'}, effectNames: Dict[str, Literal['gain', 'lose', 'set', 'toggle', 'deactivate', 'edit', 'goto', 'bounce', 'follow', 'save']] = {'gain': 'gain', 'lose': 'lose', 'set': 'set', 'toggle': 'toggle', 'deactivate': 'deactivate', 'edit': 'edit', 'goto': 'goto', 'bounce': 'bounce', 'follow': 'follow', 'save': 'save'}, focalizationNames: Dict[str, Literal['singular', 'plural', 'spreading']] = {'singular': 'singular', 'plural': 'plural', 'spreading': 'spreading'}, successFailureIndicators: Tuple[str, str] = ('s', 'f'))
590    def __init__(
591        self,
592        formatDict: Format = DEFAULT_FORMAT,
593        effectNames: Dict[str, base.EffectType] = DEFAULT_EFFECT_NAMES,
594        focalizationNames: Dict[
595            str,
596            base.DomainFocalization
597        ] = DEFAULT_FOCALIZATION_NAMES,
598        successFailureIndicators: Tuple[str, str] = DEFAULT_SF_INDICATORS
599    ):
600        """
601        Sets up the parsing format. Requires a `Format` dictionary to
602        define the specifics. Raises a `ValueError` unless the keys of
603        the `Format` dictionary exactly match the `Lexeme` values.
604        """
605        self.formatDict = formatDict
606        self.effectNames = effectNames
607        self.focalizationNames = focalizationNames
608        if (
609            len(successFailureIndicators) != 2
610        or any(len(i) != 1 for i in successFailureIndicators)
611        ):
612            raise ValueError(
613                f"Invalid success/failure indicators: must be a pair of"
614                f" length-1 strings. Got: {successFailureIndicators!r}"
615            )
616        self.successIndicator, self.failureIndicator = (
617            successFailureIndicators
618        )
619
620        # Check completeness for each dictionary
621        checkCompleteness('formatDict', self.formatDict, set(Lexeme))
622        checkCompleteness(
623            'effectNames',
624            self.effectNames,
625            valuesSet=set(get_args(base.EffectType))
626        )
627        checkCompleteness(
628            'focalizationNames',
629            self.focalizationNames,
630            valuesSet=set(get_args(base.DomainFocalization))
631        )
632
633        # Build some reverse lookup dictionaries for specific
634        self.reverseFormat = {y: x for (x, y) in self.formatDict.items()}
635
636        # circumstances:
637        self.effectModMap = {
638            self.formatDict[x]: x
639            for x in [
640                Lexeme.effectCharges,
641                Lexeme.sepOrDelay,
642                Lexeme.inCommon,
643                Lexeme.isHidden
644            ]
645        }

Sets up the parsing format. Requires a Format dictionary to define the specifics. Raises a ValueError unless the keys of the Format dictionary exactly match the Lexeme values.

formatDict
effectNames
focalizationNames
reverseFormat
effectModMap
def lex(self, content: str) -> List[Union[Lexeme, str]]:
647    def lex(self, content: str) -> LexedTokens:
648        """
649        Applies `lex` using this format's lexeme mapping.
650        """
651        return lex(content, self.reverseFormat)

Applies lex using this format's lexeme mapping.

def onOff(self, word: str) -> Optional[bool]:
653    def onOff(self, word: str) -> Optional[bool]:
654        """
655        Parse an on/off indicator and returns a boolean (`True` for on
656        and `False` for off). Returns `None` if the word isn't either
657        the 'on' or the 'off' word. Generates a `ParseWarning`
658        (and still returns `None`) if the word is a case-swapped version
659        of the 'on' or 'off' word and is not equal to either of them.
660        """
661        onWord = self.formatDict[Lexeme.stateOn]
662        offWord = self.formatDict[Lexeme.stateOff]
663
664        # Generate warning if we suspect a case error
665        if (
666            word.casefold() in (onWord, offWord)
667        and word not in (onWord, offWord)
668        ):
669            warnings.warn(
670                (
671                    f"Word '{word}' cannot be interpreted as an on/off"
672                    f" value, although it is almost one (the correct"
673                    f" values are '{onWord}' and '{offWord}'."
674                ),
675                ParseWarning
676            )
677
678        # return the appropriate value
679        if word == onWord:
680            return True
681        elif word == offWord:
682            return False
683        else:
684            return None

Parse an on/off indicator and returns a boolean (True for on and False for off). Returns None if the word isn't either the 'on' or the 'off' word. Generates a ParseWarning (and still returns None) if the word is a case-swapped version of the 'on' or 'off' word and is not equal to either of them.

def matchingBrace( self, tokens: List[Union[Lexeme, str]], where: int, opener: int = <Lexeme.openCurly: 17>, closer: int = <Lexeme.closeCurly: 18>) -> int:
686    def matchingBrace(
687        self,
688        tokens: LexedTokens,
689        where: int,
690        opener: int = Lexeme.openCurly,
691        closer: int = Lexeme.closeCurly
692    ) -> int:
693        """
694        Returns the index within the given tokens list of the closing
695        curly brace which matches the open brace at the specified index.
696        You can specify custom `opener` and/or `closer` lexemes to find
697        matching pairs of other things. Raises a `ParseError` if there
698        is no opening brace at the specified index, or if there isn't a
699        matching closing brace. Handles nested braces of the specified
700        type.
701
702        Examples:
703        >>> pf = ParseFormat()
704        >>> ob = Lexeme.openCurly
705        >>> cb = Lexeme.closeCurly
706        >>> pf.matchingBrace([ob, cb], 0)
707        1
708        >>> pf.matchingBrace([ob, cb], 1)
709        Traceback (most recent call last):
710          ...
711        exploration.parsing.ParseError: ...
712        >>> pf.matchingBrace(['hi', ob, cb], 0)
713        Traceback (most recent call last):
714          ...
715        exploration.parsing.ParseError: ...
716        >>> pf.matchingBrace(['hi', ob, cb], 1)
717        2
718        >>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
719        3
720        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
721        Traceback (most recent call last):
722          ...
723        exploration.parsing.ParseError: ...
724        >>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
725        3
726        >>> pf.matchingBrace([ob, ob, cb, cb], 0)
727        3
728        >>> pf.matchingBrace([ob, ob, cb, cb], 1)
729        2
730        >>> pf.matchingBrace([ob, cb, ob, cb], 0)
731        1
732        >>> pf.matchingBrace([ob, cb, ob, cb], 2)
733        3
734        >>> pf.matchingBrace([ob, cb, cb, cb], 0)
735        1
736        >>> pf.matchingBrace([ob, ob, ob, cb], 0)
737        Traceback (most recent call last):
738          ...
739        exploration.parsing.ParseError: ...
740        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
741        7
742        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
743        6
744        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
745        Traceback (most recent call last):
746          ...
747        exploration.parsing.ParseError: ...
748        >>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
749        4
750        >>> op = Lexeme.openParen
751        >>> cp = Lexeme.closeParen
752        >>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
753        3
754        """
755        if where >= len(tokens):
756            raise ParseError(
757                f"Out-of-bounds brace start: index {where} with"
758                f" {len(tokens)} tokens."
759            )
760        if tokens[where] != opener:
761            raise ParseError(
762                f"Can't find matching brace for token"
763                f" {repr(tokens[where])} at index {where} because it's"
764                f" not an open brace."
765            )
766
767        level = 1
768        for i in range(where + 1, len(tokens)):
769            token = tokens[i]
770            if token == opener:
771                level += 1
772            elif token == closer:
773                level -= 1
774                if level == 0:
775                    return i
776
777        raise ParseError(
778            f"Failed to find matching curly brace from index {where}."
779        )

Returns the index within the given tokens list of the closing curly brace which matches the open brace at the specified index. You can specify custom opener and/or closer lexemes to find matching pairs of other things. Raises a ParseError if there is no opening brace at the specified index, or if there isn't a matching closing brace. Handles nested braces of the specified type.

Examples:

>>> pf = ParseFormat()
>>> ob = Lexeme.openCurly
>>> cb = Lexeme.closeCurly
>>> pf.matchingBrace([ob, cb], 0)
1
>>> pf.matchingBrace([ob, cb], 1)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace(['hi', ob, cb], 0)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace(['hi', ob, cb], 1)
2
>>> pf.matchingBrace(['hi', ob, 'lo', cb], 1)
3
>>> pf.matchingBrace([ob, 'hi', 'lo', cb], 1)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace([ob, 'hi', 'lo', cb], 0)
3
>>> pf.matchingBrace([ob, ob, cb, cb], 0)
3
>>> pf.matchingBrace([ob, ob, cb, cb], 1)
2
>>> pf.matchingBrace([ob, cb, ob, cb], 0)
1
>>> pf.matchingBrace([ob, cb, ob, cb], 2)
3
>>> pf.matchingBrace([ob, cb, cb, cb], 0)
1
>>> pf.matchingBrace([ob, ob, ob, cb], 0)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 0)
7
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 1)
6
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 2)
Traceback (most recent call last):
  ...
ParseError: ...
>>> pf.matchingBrace([ob, ob, 'hi', ob, cb, 'lo', cb, cb], 3)
4
>>> op = Lexeme.openParen
>>> cp = Lexeme.closeParen
>>> pf.matchingBrace([ob, op, ob, cp], 1, op, cp)
3
def parseFocalization(self, word: str) -> Literal['singular', 'plural', 'spreading']:
781    def parseFocalization(self, word: str) -> base.DomainFocalization:
782        """
783        Parses a focalization type for a domain, recognizing
784        'domainFocalizationSingular', 'domainFocalizationPlural', and
785        'domainFocalizationSpreading'.
786        """
787        try:
788            return self.focalizationNames[word]
789        except KeyError:
790            raise ParseError(
791                f"Invalid domain focalization name {repr(word)}. Valid"
792                f" name are: {repr(list(self.focalizationNames))}'."
793            )

Parses a focalization type for a domain, recognizing 'domainFocalizationSingular', 'domainFocalizationPlural', and 'domainFocalizationSpreading'.

def parseTagValue( self, value: str) -> Union[bool, int, float, str, list, dict, NoneType, exploration.base.Requirement, List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]]:
795    def parseTagValue(self, value: str) -> base.TagValue:
796        """
797        Converts a string to a tag value, following these rules:
798
799        1. If the string is exactly one of 'None', 'True', or 'False', we
800            convert it to the corresponding Python value.
801        2. If the string can be converted to an integer without raising a
802            ValueError, we use that integer.
803        3. If the string can be converted to a float without raising a
804            ValueError, we use that float.
805        4. Otherwise, it remains a string.
806
807        Note that there is currently no syntax for using list, dictionary,
808        Requirement, or Consequence tag values.
809        TODO: Support those types?
810
811        Examples:
812
813        >>> pf = ParseFormat()
814        >>> pf.parseTagValue('hi')
815        'hi'
816        >>> pf.parseTagValue('3')
817        3
818        >>> pf.parseTagValue('3.0')
819        3.0
820        >>> pf.parseTagValue('True')
821        True
822        >>> pf.parseTagValue('False')
823        False
824        >>> pf.parseTagValue('None') is None
825        True
826        >>> pf.parseTagValue('none')
827        'none'
828        """
829        # TODO: Allow these keywords to be redefined?
830        if value == 'True':
831            return True
832        elif value == 'False':
833            return False
834        elif value == 'None':
835            return None
836        else:
837            try:
838                return int(value)
839            except ValueError:
840                try:
841                    return float(value)
842                except ValueError:
843                    return value

Converts a string to a tag value, following these rules:

  1. If the string is exactly one of 'None', 'True', or 'False', we convert it to the corresponding Python value.
  2. If the string can be converted to an integer without raising a ValueError, we use that integer.
  3. If the string can be converted to a float without raising a ValueError, we use that float.
  4. Otherwise, it remains a string.

Note that there is currently no syntax for using list, dictionary, Requirement, or Consequence tag values. TODO: Support those types?

Examples:

>>> pf = ParseFormat()
>>> pf.parseTagValue('hi')
'hi'
>>> pf.parseTagValue('3')
3
>>> pf.parseTagValue('3.0')
3.0
>>> pf.parseTagValue('True')
True
>>> pf.parseTagValue('False')
False
>>> pf.parseTagValue('None') is None
True
>>> pf.parseTagValue('none')
'none'
def unparseTagValue( self, value: Union[bool, int, float, str, list, dict, NoneType, exploration.base.Requirement, List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]]) -> str:
845    def unparseTagValue(self, value: base.TagValue) -> str:
846        """
847        Converts a tag value into a string that would be parsed back into a
848        tag value via `parseTagValue`. Currently does not work for list,
849        dictionary, Requirement, or Consequence values.
850        TODO: Those
851        """
852        return str(value)

Converts a tag value into a string that would be parsed back into a tag value via parseTagValue. Currently does not work for list, dictionary, Requirement, or Consequence values. TODO: Those

def hasZoneParts(self, name: str) -> bool:
854    def hasZoneParts(self, name: str) -> bool:
855        """
856        Returns true if the specified name contains zone parts (using
857        the `zoneSeparator`).
858        """
859        return self.formatDict[Lexeme.zoneSeparator] in name

Returns true if the specified name contains zone parts (using the zoneSeparator).

def splitZone(self, name: str) -> Tuple[List[str], str]:
861    def splitZone(
862        self,
863        name: str
864    ) -> Tuple[List[base.Zone], base.DecisionName]:
865        """
866        Splits a decision name that includes zone information into the
867        list-of-zones part and the decision part. If there is no zone
868        information in the name, the list-of-zones will be an empty
869        list.
870        """
871        sep = self.formatDict[Lexeme.zoneSeparator]
872        parts = name.split(sep)
873        return (list(parts[:-1]), parts[-1])

Splits a decision name that includes zone information into the list-of-zones part and the decision part. If there is no zone information in the name, the list-of-zones will be an empty list.

def prefixWithZone(self, name: str, zone: str) -> str:
875    def prefixWithZone(
876        self,
877        name: base.DecisionName,
878        zone: base.Zone
879    ) -> base.DecisionName:
880        """
881        Returns the given decision name, prefixed with the given zone
882        name. Does NOT check whether the decision name already includes
883        a prefix or not.
884        """
885        return zone + self.formatDict[Lexeme.zoneSeparator] + name

Returns the given decision name, prefixed with the given zone name. Does NOT check whether the decision name already includes a prefix or not.

def parseAnyTransitionFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0) -> Tuple[Tuple[str, List[bool]], int]:
887    def parseAnyTransitionFromTokens(
888        self,
889        tokens: LexedTokens,
890        start: int = 0
891    ) -> Tuple[base.TransitionWithOutcomes, int]:
892        """
893        Parses a `base.TransitionWithOutcomes` from a tokens list,
894        accepting either a transition name or a transition name followed
895        by a `Lexeme.withDetails` followed by a string of success and
896        failure indicator characters. Returns a tuple containing a
897        `base.TransitionWithOutcomes` and an integer indicating the end
898        index of the parsed item within the tokens.
899        """
900        # Normalize start index so we can do index math
901        if start < 0:
902            useIndex = len(tokens) + start
903        else:
904            useIndex = start
905
906        try:
907            first = tokens[useIndex]
908        except IndexError:
909            raise ParseError(
910                f"Invalid token index: {start!r} among {len(tokens)}"
911                f" tokens."
912            )
913
914        if isinstance(first, Lexeme):
915            raise ParseError(
916                f"Expecting a transition name (possibly with a"
917                f" success/failure indicator string) but first token is"
918                f" {first!r}."
919            )
920
921        try:
922            second = tokens[useIndex + 1]
923            third = tokens[useIndex + 2]
924        except IndexError:
925            return ((first, []), useIndex)
926
927        if second != Lexeme.withDetails or isinstance(third, Lexeme):
928            return ((first, []), useIndex)
929
930        outcomes = []
931        for char in third:
932            if char == self.successIndicator:
933                outcomes.append(True)
934            elif char == self.failureIndicator:
935                outcomes.append(False)
936            else:
937                return ((first, []), useIndex)
938
939        return ((first, outcomes), useIndex + 2)

Parses a base.TransitionWithOutcomes from a tokens list, accepting either a transition name or a transition name followed by a Lexeme.withDetails followed by a string of success and failure indicator characters. Returns a tuple containing a base.TransitionWithOutcomes and an integer indicating the end index of the parsed item within the tokens.

def parseTransitionWithOutcomes(self, content: str) -> Tuple[str, List[bool]]:
941    def parseTransitionWithOutcomes(
942        self,
943        content: str
944    ) -> base.TransitionWithOutcomes:
945        """
946        Takes a transition that may have outcomes listed as a series of
947        s/f strings after a colon and returns the corresponding
948        `TransitionWithOutcomes` tuple. Calls `lex` and then
949        `parseAnyTransitionFromTokens`.
950        """
951        return self.parseAnyTransitionFromTokens(self.lex(content))[0]

Takes a transition that may have outcomes listed as a series of s/f strings after a colon and returns the corresponding TransitionWithOutcomes tuple. Calls lex and then parseAnyTransitionFromTokens.

def unparseTransitionWithOutocmes(self, transition: Union[str, Tuple[str, List[bool]]]) -> str:
953    def unparseTransitionWithOutocmes(
954        self,
955        transition: base.AnyTransition
956    ) -> str:
957        """
958        Turns a `base.AnyTransition` back into a string that would parse
959        to an equivalent `base.TransitionWithOutcomes` via
960        `parseTransitionWithOutcomes`. If a bare `base.Transition` is
961        given, returns a string that would result in a
962        `base.TransitionWithOutcomes` that has an empty outcomes
963        sequence.
964        """
965        if isinstance(transition, base.Transition):
966            return transition
967        elif (
968            isinstance(transition, tuple)
969        and len(transition) == 2
970        and isinstance(transition[0], base.Transition)
971        and isinstance(transition[1], list)
972        and all(isinstance(sfi, bool) for sfi in transition[1])
973        ):
974            if len(transition[1]) == 0:
975                return transition[0]
976            else:
977                result = transition[0] + self.formatDict[Lexeme.withDetails]
978                for outcome in transition[1]:
979                    if outcome:
980                        result += self.successIndicator
981                    else:
982                        result += self.failureIndicator
983                return result
984        else:
985            raise TypeError(
986                f"Invalid AnyTransition: neither a string, nor a"
987                f" length-2 tuple consisting of a string followed by a"
988                f" list of booleans. Got: {transition!r}"
989            )

Turns a base.AnyTransition back into a string that would parse to an equivalent base.TransitionWithOutcomes via parseTransitionWithOutcomes. If a bare base.Transition is given, returns a string that would result in a base.TransitionWithOutcomes that has an empty outcomes sequence.

def parseSpecificTransition(self, content: str) -> Tuple[str, str]:
 991    def parseSpecificTransition(
 992        self,
 993        content: str
 994    ) -> Tuple[base.DecisionName, base.Transition]:
 995        """
 996        Splits a decision:transition pair to the decision and transition
 997        part, using a custom separator if one is defined.
 998        """
 999        sep = self.formatDict[Lexeme.withDetails]
1000        n = content.count(sep)
1001        if n == 0:
1002            raise ParseError(
1003                f"Cannot split '{content}' into a decision name and a"
1004                f" transition name (no separator '{sep}' found)."
1005            )
1006        elif n > 1:
1007            raise ParseError(
1008                f"Cannot split '{content}' into a decision name and a"
1009                f" transition name (too many ({n}) '{sep}' separators"
1010                f" found)."
1011            )
1012        else:
1013            return cast(
1014                Tuple[base.DecisionName, base.Transition],
1015                tuple(content.split(sep))
1016            )

Splits a decision:transition pair to the decision and transition part, using a custom separator if one is defined.

def splitDirections(self, content: str) -> Tuple[Optional[str], Optional[str]]:
1018    def splitDirections(
1019        self,
1020        content: str
1021    ) -> Tuple[Optional[str], Optional[str]]:
1022        """
1023        Splits a piece of text using the 'Lexeme.reciprocalSeparator'
1024        into two pieces. If there is no separator, the second piece will
1025        be `None`; if either side of the separator is blank, that side
1026        will be `None`, and if there is more than one separator, a
1027        `ParseError` will be raised. Whitespace will be stripped from
1028        both sides of each result.
1029
1030        Examples:
1031
1032        >>> pf = ParseFormat()
1033        >>> pf.splitDirections('abc / def')
1034        ('abc', 'def')
1035        >>> pf.splitDirections('abc def ')
1036        ('abc def', None)
1037        >>> pf.splitDirections('abc def /')
1038        ('abc def', None)
1039        >>> pf.splitDirections('/abc def')
1040        (None, 'abc def')
1041        >>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
1042        Traceback (most recent call last):
1043          ...
1044        ParseError: ...
1045        """
1046        sep = self.formatDict[Lexeme.reciprocalSeparator]
1047        count = content.count(sep)
1048        if count > 1:
1049            raise ParseError(
1050                f"Too many split points ('{sep}') in content:"
1051                f" '{content}' (only one is allowed)."
1052            )
1053
1054        elif count == 1:
1055            before, after = content.split(sep)
1056            before = before.strip()
1057            after = after.strip()
1058            return (before or None, after or None)
1059
1060        else: # no split points
1061            stripped = content.strip()
1062            if stripped:
1063                return stripped, None
1064            else:
1065                return None, None

Splits a piece of text using the 'Lexeme.reciprocalSeparator' into two pieces. If there is no separator, the second piece will be None; if either side of the separator is blank, that side will be None, and if there is more than one separator, a ParseError will be raised. Whitespace will be stripped from both sides of each result.

Examples:

>>> pf = ParseFormat()
>>> pf.splitDirections('abc / def')
('abc', 'def')
>>> pf.splitDirections('abc def ')
('abc def', None)
>>> pf.splitDirections('abc def /')
('abc def', None)
>>> pf.splitDirections('/abc def')
(None, 'abc def')
>>> pf.splitDirections('a/b/c') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
  ...
ParseError: ...
def parseItem(self, item: str) -> Union[str, Tuple[str, int], Tuple[str, str]]:
1067    def parseItem(
1068        self,
1069        item: str
1070    ) -> Union[
1071        base.Capability,
1072        Tuple[base.Token, int],
1073        Tuple[base.MechanismName, base.MechanismState]
1074    ]:
1075        """
1076        Parses an item, which is a capability (just a string), a
1077        token-type*number pair (returned as a tuple with the number
1078        converted to an integer), or a mechanism-name:state pair
1079        (returned as a tuple with the state as a string). The
1080        'Lexeme.tokenCount' and `Lexeme.mechanismSeparator` format
1081        values determine the separators that this looks for.
1082        """
1083        tsep = self.formatDict[Lexeme.tokenCount]
1084        msep = self.formatDict[Lexeme.mechanismSeparator]
1085        if tsep in item:
1086            # It's a token w/ an associated count
1087            parts = item.split(tsep)
1088            if len(parts) != 2:
1089                raise ParseError(
1090                    f"Item '{item}' has a '{tsep}' but doesn't separate"
1091                    f" into a token type and a count."
1092                )
1093            typ, count = parts
1094            try:
1095                num = int(count)
1096            except ValueError:
1097                raise ParseError(
1098                    f"Item '{item}' has invalid token count '{count}'."
1099                )
1100
1101            return (typ, num)
1102        elif msep in item:
1103            parts = item.split(msep)
1104            mechanism = msep.join(parts[:-1])
1105            state = parts[-1]
1106            if mechanism.endswith(':'):
1107                # Just a zone-qualified name...
1108                return item
1109            else:
1110                return (mechanism, state)
1111        else:
1112            # It's just a capability
1113            return item

Parses an item, which is a capability (just a string), a token-type*number pair (returned as a tuple with the number converted to an integer), or a mechanism-name:state pair (returned as a tuple with the state as a string). The 'Lexeme.tokenCount' and Lexeme.mechanismSeparator format values determine the separators that this looks for.

def unparseDecisionSpecifier(self, spec: exploration.base.DecisionSpecifier) -> str:
1115    def unparseDecisionSpecifier(self, spec: base.DecisionSpecifier) -> str:
1116        """
1117        Turns a decision specifier back into a string, which would be
1118        parsed as a decision specifier as part of various different
1119        things.
1120
1121        For example:
1122
1123        >>> pf = ParseFormat()
1124        >>> pf.unparseDecisionSpecifier(
1125        ...     base.DecisionSpecifier(None, None, 'where')
1126        ... )
1127        'where'
1128        >>> pf.unparseDecisionSpecifier(
1129        ...     base.DecisionSpecifier(None, 'zone', 'where')
1130        ... )
1131        'zone::where'
1132        >>> pf.unparseDecisionSpecifier(
1133        ...     base.DecisionSpecifier('domain', 'zone', 'where')
1134        ... )
1135        'domain//zone::where'
1136        >>> pf.unparseDecisionSpecifier(
1137        ...     base.DecisionSpecifier('domain', None, 'where')
1138        ... )
1139        'domain//where'
1140        """
1141        result = spec.name
1142        if spec.zone is not None:
1143            result = (
1144                spec.zone
1145              + self.formatDict[Lexeme.zoneSeparator]
1146              + result
1147            )
1148        if spec.domain is not None:
1149            result = (
1150                spec.domain
1151              + self.formatDict[Lexeme.domainSeparator]
1152              + result
1153            )
1154        return result

Turns a decision specifier back into a string, which would be parsed as a decision specifier as part of various different things.

For example:

>>> pf = ParseFormat()
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier(None, None, 'where')
... )
'where'
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier(None, 'zone', 'where')
... )
'zone::where'
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier('domain', 'zone', 'where')
... )
'domain//zone::where'
>>> pf.unparseDecisionSpecifier(
...     base.DecisionSpecifier('domain', None, 'where')
... )
'domain//where'
def unparseMechanismSpecifier(self, spec: exploration.base.MechanismSpecifier) -> str:
1156    def unparseMechanismSpecifier(
1157        self,
1158        spec: base.MechanismSpecifier
1159    ) -> str:
1160        """
1161        Turns a mechanism specifier back into a string, which would be
1162        parsed as a mechanism specifier as part of various different
1163        things. Note that a mechanism specifier with a zone part but no
1164        decision part is not valid, since it would parse as a decision
1165        part instead.
1166
1167        For example:
1168
1169        >>> pf = ParseFormat()
1170        >>> pf.unparseMechanismSpecifier(
1171        ...     base.MechanismSpecifier(None, None, None, 'lever')
1172        ... )
1173        'lever'
1174        >>> pf.unparseMechanismSpecifier(
1175        ...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
1176        ... )
1177        'domain//zone::decision::door'
1178        >>> pf.unparseMechanismSpecifier(
1179        ...     base.MechanismSpecifier('domain', None, None, 'door')
1180        ... )
1181        'domain//door'
1182        >>> pf.unparseMechanismSpecifier(
1183        ...     base.MechanismSpecifier(None, 'a', 'b', 'door')
1184        ... )
1185        'a::b::door'
1186        >>> pf.unparseMechanismSpecifier(
1187        ...     base.MechanismSpecifier(None, 'a', None, 'door')
1188        ... )
1189        Traceback (most recent call last):
1190        ...
1191        exploration.base.InvalidMechanismSpecifierError...
1192        >>> pf.unparseMechanismSpecifier(
1193        ...     base.MechanismSpecifier(None, None, 'a', 'door')
1194        ... )
1195        'a::door'
1196        """
1197        if spec.decision is None and spec.zone is not None:
1198            raise base.InvalidMechanismSpecifierError(
1199                f"Mechanism specifier has a zone part but no decision"
1200                f" part; it cannot be unparsed since it would parse"
1201                f" differently:\n{spec}"
1202            )
1203        result = spec.name
1204        if spec.decision is not None:
1205            result = (
1206                spec.decision
1207              + self.formatDict[Lexeme.zoneSeparator]
1208              + result
1209            )
1210        if spec.zone is not None:
1211            result = (
1212                spec.zone
1213              + self.formatDict[Lexeme.zoneSeparator]
1214              + result
1215            )
1216        if spec.domain is not None:
1217            result = (
1218                spec.domain
1219              + self.formatDict[Lexeme.domainSeparator]
1220              + result
1221            )
1222        return result

Turns a mechanism specifier back into a string, which would be parsed as a mechanism specifier as part of various different things. Note that a mechanism specifier with a zone part but no decision part is not valid, since it would parse as a decision part instead.

For example:

>>> pf = ParseFormat()
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, None, None, 'lever')
... )
'lever'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier('domain', 'zone', 'decision', 'door')
... )
'domain//zone::decision::door'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier('domain', None, None, 'door')
... )
'domain//door'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, 'a', 'b', 'door')
... )
'a::b::door'
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, 'a', None, 'door')
... )
Traceback (most recent call last):
...
exploration.base.InvalidMechanismSpecifierError...
>>> pf.unparseMechanismSpecifier(
...     base.MechanismSpecifier(None, None, 'a', 'door')
... )
'a::door'
def effectType( self, effectMarker: str) -> Optional[Literal['gain', 'lose', 'set', 'toggle', 'deactivate', 'edit', 'goto', 'bounce', 'follow', 'save']]:
1224    def effectType(self, effectMarker: str) -> Optional[base.EffectType]:
1225        """
1226        Returns the `base.EffectType` string corresponding to the
1227        given effect marker string. Returns `None` for an unrecognized
1228        marker.
1229        """
1230        return self.effectNames.get(effectMarker)

Returns the base.EffectType string corresponding to the given effect marker string. Returns None for an unrecognized marker.

1232    def parseCommandFromTokens(
1233        self,
1234        tokens: LexedTokens,
1235        start: int = 0,
1236        end: int = -1
1237    ) -> commands.Command:
1238        """
1239        Given tokens that specify a `commands.Command`, parses that
1240        command and returns it. Really just turns the tokens back into
1241        strings and calls `commands.command`.
1242
1243        For example:
1244
1245        >>> pf = ParseFormat()
1246        >>> t = ['val', '5']
1247        >>> c = commands.command(*t)
1248        >>> pf.parseCommandFromTokens(t) == c
1249        True
1250        >>> t = ['op', Lexeme.tokenCount, '$val', '$val']
1251        >>> c = commands.command('op', '*', '$val', '$val')
1252        >>> pf.parseCommandFromTokens(t) == c
1253        True
1254        """
1255        start, end, nTokens = normalizeEnds(tokens, start, end)
1256        args: List[str] = []
1257        for token in tokens[start:end + 1]:
1258            if isinstance(token, Lexeme):
1259                args.append(self.formatDict[token])
1260            else:
1261                args.append(token)
1262
1263        if len(args) == 0:
1264            raise ParseError(
1265                f"No arguments for command:\n{tokens[start:end + 1]}"
1266            )
1267        return commands.command(*args)

Given tokens that specify a commands.Command, parses that command and returns it. Really just turns the tokens back into strings and calls commands.command.

For example:

>>> pf = ParseFormat()
>>> t = ['val', '5']
>>> c = commands.command(*t)
>>> pf.parseCommandFromTokens(t) == c
True
>>> t = ['op', Lexeme.tokenCount, '$val', '$val']
>>> c = commands.command('op', '*', '$val', '$val')
>>> pf.parseCommandFromTokens(t) == c
True
1269    def unparseCommand(self, command: commands.Command) -> str:
1270        """
1271        Turns a `Command` back into the string that would produce that
1272        command when parsed using `parseCommandList`.
1273
1274        Note that the results will be more explicit in some cases than what
1275        `parseCommandList` would accept as input.
1276
1277        For example:
1278
1279        >>> pf = ParseFormat()
1280        >>> pf.unparseCommand(
1281        ...     commands.LiteralValue(command='val', value='5')
1282        ... )
1283        'val 5'
1284        >>> pf.unparseCommand(
1285        ...     commands.LiteralValue(command='val', value='"5"')
1286        ... )
1287        'val "5"'
1288        >>> pf.unparseCommand(
1289        ...     commands.EstablishCollection(
1290        ...         command='empty',
1291        ...         collection='list'
1292        ...     )
1293        ... )
1294        'empty list'
1295        >>> pf.unparseCommand(
1296        ...     commands.AppendValue(command='append', value='$_')
1297        ... )
1298        'append $_'
1299        """
1300        candidate = None
1301        for k, v in commands.COMMAND_SETUP.items():
1302            if v[0] == type(command):
1303                if candidate is None:
1304                    candidate = k
1305                else:
1306                    raise ValueError(
1307                        f"COMMAND_SETUP includes multiple keys with"
1308                        f" {type(command)} as their value type:"
1309                        f" '{candidate}' and '{k}'."
1310                    )
1311
1312        if candidate is None:
1313            raise ValueError(
1314                f"COMMAND_SETUP has no key with {type(command)} as its"
1315                f" value type."
1316            )
1317
1318        result = candidate
1319        for x in command[1:]:
1320            # TODO: Is this hack good enough?
1321            result += ' ' + str(x)
1322        return result

Turns a Command back into the string that would produce that command when parsed using parseCommandList.

Note that the results will be more explicit in some cases than what parseCommandList would accept as input.

For example:

>>> pf = ParseFormat()
>>> pf.unparseCommand(
...     commands.LiteralValue(command='val', value='5')
... )
'val 5'
>>> pf.unparseCommand(
...     commands.LiteralValue(command='val', value='"5"')
... )
'val "5"'
>>> pf.unparseCommand(
...     commands.EstablishCollection(
...         command='empty',
...         collection='list'
...     )
... )
'empty list'
>>> pf.unparseCommand(
...     commands.AppendValue(command='append', value='$_')
... )
'append $_'
1324    def unparseCommandList(self, commands: List[commands.Command]) -> str:
1325        """
1326        Takes a list of commands and returns a string that would parse
1327        into them using `parseOneEffectArg`. The result contains
1328        newlines and indentation to make it easier to read.
1329
1330        For example:
1331
1332        >>> pf = ParseFormat()
1333        >>> pf.unparseCommandList(
1334        ...     [commands.command('val', '5'), commands.command('pop')]
1335        ... )
1336        '{\\n  val 5;\\n  pop;\\n}'
1337        """
1338        result = self.formatDict[Lexeme.openCurly]
1339        for cmd in commands:
1340            result += f'\n  {self.unparseCommand(cmd)};'
1341        if len(commands) > 0:
1342            result += '\n'
1343        return result + self.formatDict[Lexeme.closeCurly]

Takes a list of commands and returns a string that would parse into them using parseOneEffectArg. The result contains newlines and indentation to make it easier to read.

For example:

>>> pf = ParseFormat()
>>> pf.unparseCommandList(
...     [commands.command('val', '5'), commands.command('pop')]
... )
'{\n  val 5;\n  pop;\n}'
1345    def parseCommandListFromTokens(
1346        self,
1347        tokens: LexedTokens,
1348        start: int = 0
1349    ) -> Tuple[List[commands.Command], int]:
1350        """
1351        Parses a command list from a list of lexed tokens, which must
1352        start with `Lexeme.openCurly`. Returns the parsed command list
1353        as a list of `commands.Command` objects, along with the end
1354        index of that command list (which will be the matching curly
1355        brace.
1356        """
1357        end = self.matchingBrace(
1358            tokens,
1359            start,
1360            Lexeme.openCurly,
1361            Lexeme.closeCurly
1362        )
1363        parts = list(
1364            findSeparatedParts(
1365                tokens,
1366                Lexeme.consequenceSeparator,
1367                start + 1,
1368                end - 1,
1369                Lexeme.openCurly,
1370                Lexeme.closeCurly,
1371            )
1372        )
1373        return (
1374            [
1375                self.parseCommandFromTokens(tokens, fromIndex, toIndex)
1376                for fromIndex, toIndex in parts
1377                if fromIndex <= toIndex  # ignore empty parts
1378            ],
1379            end
1380        )

Parses a command list from a list of lexed tokens, which must start with Lexeme.openCurly. Returns the parsed command list as a list of commands.Command objects, along with the end index of that command list (which will be the matching curly brace.

1382    def parseOneEffectArg(
1383        self,
1384        tokens: LexedTokens,
1385        start: int = 0,
1386        limit: Optional[int] = None
1387    ) -> Tuple[
1388        Union[
1389            base.Capability,  # covers 'str' possibility
1390            Tuple[base.Token, base.TokenCount],
1391            Tuple[Literal['skill'], base.Skill, base.Level],
1392            Tuple[base.MechanismSpecifier, base.MechanismState],
1393            base.DecisionSpecifier,
1394            base.DecisionID,
1395            Literal[Lexeme.inCommon, Lexeme.isHidden],
1396            Tuple[Literal[Lexeme.sepOrDelay, Lexeme.effectCharges], int],
1397            List[commands.Command]
1398        ],
1399        int
1400    ]:
1401        """
1402        Looks at tokens starting at the specified position and parses
1403        one or more of them as an effect argument (an argument that
1404        could be given to `base.effect`). Looks at various key `Lexeme`s
1405        to determine which type to use.
1406
1407        Items in the tokens list beyond the specified limit will not be
1408        considered, even when they in theory could be grouped with items
1409        up to the limit into a more complex argument.
1410
1411        For example:
1412
1413        >>> pf = ParseFormat()
1414        >>> pf.parseOneEffectArg(['hi'])
1415        ('hi', 0)
1416        >>> pf.parseOneEffectArg(['hi'], 1)
1417        Traceback (most recent call last):
1418        ...
1419        IndexError...
1420        >>> pf.parseOneEffectArg(['hi', 'bye'])
1421        ('hi', 0)
1422        >>> pf.parseOneEffectArg(['hi', 'bye'], 1)
1423        ('bye', 1)
1424        >>> pf.parseOneEffectArg(
1425        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1426        ...     0
1427        ... )
1428        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1429 name='gate'), 'open'), 2)
1430        >>> pf.parseOneEffectArg(
1431        ...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
1432        ...     1
1433        ... )
1434        ((MechanismSpecifier(domain=None, zone=None, decision=None,\
1435 name='gate'), 'open'), 3)
1436        >>> pf.parseOneEffectArg(
1437        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1438        ...     1
1439        ... )
1440        Traceback (most recent call last):
1441        ...
1442        exploration.parsing.ParseError...
1443        >>> pf.parseOneEffectArg(
1444        ...     ['gate', Lexeme.mechanismSeparator, 'open'],
1445        ...     2
1446        ... )
1447        ('open', 2)
1448        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
1449        (('gold', 10), 2)
1450        >>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
1451        Traceback (most recent call last):
1452        ...
1453        exploration.parsing.ParseError...
1454        >>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
1455        (<Lexeme.inCommon: ...>, 0)
1456        >>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
1457        (<Lexeme.isHidden: ...>, 0)
1458        >>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
1459        Traceback (most recent call last):
1460        ...
1461        exploration.parsing.ParseError...
1462        >>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
1463        ((<Lexeme.effectCharges: ...>, 3), 1)
1464        >>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
1465        Traceback (most recent call last):
1466        ...
1467        exploration.parsing.ParseError...
1468        >>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
1469        ((<Lexeme.sepOrDelay: ...>, -2), 1)
1470        >>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
1471        (('skill', 'agility', 3), 2)
1472        >>> pf.parseOneEffectArg(
1473        ...     [
1474        ...         'main',
1475        ...         Lexeme.domainSeparator,
1476        ...         'zone',
1477        ...         Lexeme.zoneSeparator,
1478        ...         'decision',
1479        ...         Lexeme.zoneSeparator,
1480        ...         'compass',
1481        ...         Lexeme.mechanismSeparator,
1482        ...         'north',
1483        ...         'south',
1484        ...         'east',
1485        ...         'west'
1486        ...     ],
1487        ...     0
1488        ... )
1489        ((MechanismSpecifier(domain='main', zone='zone',\
1490 decision='decision', name='compass'), 'north'), 8)
1491        >>> pf.parseOneEffectArg(
1492        ...     [
1493        ...         'before',
1494        ...         'main',
1495        ...         Lexeme.domainSeparator,
1496        ...         'zone',
1497        ...         Lexeme.zoneSeparator,
1498        ...         'decision',
1499        ...         Lexeme.zoneSeparator,
1500        ...         'compass',
1501        ...         'north',
1502        ...         'south',
1503        ...         'east',
1504        ...         'west'
1505        ...     ],
1506        ...     1
1507        ... )  # a mechanism specifier without a state will become a
1508        ...    # decision specifier
1509        (DecisionSpecifier(domain='main', zone='zone',\
1510 name='decision'), 5)
1511        >>> tokens = [
1512        ...     'set',
1513        ...     'main',
1514        ...     Lexeme.domainSeparator,
1515        ...     'zone',
1516        ...     Lexeme.zoneSeparator,
1517        ...     'compass',
1518        ...     'north',
1519        ...     'bounce',
1520        ... ]
1521        >>> pf.parseOneEffectArg(tokens, 0)
1522        ('set', 0)
1523        >>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
1524        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1525        >>> pf.parseOneEffectArg(tokens, 1)
1526        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
1527        >>> pf.parseOneEffectArg(tokens, 6)
1528        ('north', 6)
1529        >>> pf.parseOneEffectArg(tokens, 7)
1530        ('bounce', 7)
1531        >>> pf.parseOneEffectArg(
1532        ...     [
1533        ...         "fort", Lexeme.zoneSeparator, "gate",
1534        ...             Lexeme.mechanismSeparator, "open",
1535        ...     ],
1536        ...     0
1537        ... )
1538        ((MechanismSpecifier(domain=None, zone=None, decision='fort',\
1539 name='gate'), 'open'), 4)
1540        >>> pf.parseOneEffectArg(
1541        ...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
1542        ...     0
1543        ... ) == ([commands.command('val', '5')], 3)
1544        True
1545        >>> a = [
1546        ...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
1547        ...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
1548        ...     'pop', Lexeme.closeCurly
1549        ... ]
1550        >>> cl = [
1551        ...     [commands.command('val', '5')],
1552        ...     [commands.command('append'), commands.command('pop')]
1553        ... ]
1554        >>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
1555        True
1556        >>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
1557        True
1558        >>> pf.parseOneEffectArg(a, 1)
1559        ('val', 1)
1560        >>> pf.parseOneEffectArg(a, 2)
1561        ('5', 2)
1562        >>> pf.parseOneEffectArg(a, 3)
1563        Traceback (most recent call last):
1564        ...
1565        exploration.parsing.ParseError...
1566        """
1567        start, limit, nTokens = normalizeEnds(
1568            tokens,
1569            start,
1570            limit if limit is not None else -1
1571        )
1572        if nTokens == 0:
1573            raise ParseError("No effect arguments available.")
1574
1575        first = tokens[start]
1576
1577        if nTokens == 1:
1578            if first in (Lexeme.inCommon, Lexeme.isHidden):
1579                return (first, start)
1580            elif not isinstance(first, str):
1581                raise ParseError(
1582                    f"Only one token and it's a special character"
1583                    f" ({first} = {repr(self.formatDict[first])})"
1584                )
1585            else:
1586                return (cast(base.Capability, first), start)
1587
1588        assert (nTokens > 1)
1589
1590        second = tokens[start + 1]
1591
1592        # Command lists start with an open curly brace and effect
1593        # modifiers start with a Lexme, but nothing else may
1594        if first == Lexeme.openCurly:
1595            return self.parseCommandListFromTokens(tokens, start)
1596        elif first in (Lexeme.inCommon, Lexeme.isHidden):
1597            return (first, start)
1598        elif first in (Lexeme.sepOrDelay, Lexeme.effectCharges):
1599            if not isinstance(second, str):
1600                raise ParseError(
1601                    f"Token following a modifier that needs a count"
1602                    f" must be a string in tokens:"
1603                    f"\n{tokens[start:limit or len(tokens)]}"
1604                )
1605            try:
1606                val = int(second)
1607            except ValueError:
1608                raise ParseError(
1609                    f"Token following a modifier that needs a count"
1610                    f" must be convertible to an int:"
1611                    f"\n{tokens[start:limit or len(tokens)]}"
1612                )
1613
1614            first = cast(
1615                Literal[Lexeme.sepOrDelay, Lexeme.effectCharges],
1616                first
1617            )
1618            return ((first, val), start + 1)
1619        elif not isinstance(first, str):
1620            raise ParseError(
1621                f"First token must be a string unless it's a modifier"
1622                f" lexeme or command/reversion-set opener. Got:"
1623                f"\n{tokens[start:limit or len(tokens)]}"
1624            )
1625
1626        # If we have two strings in a row, then the first is our parsed
1627        # value alone and we'll parse the second separately.
1628        if isinstance(second, str):
1629            return (first, start)
1630        elif second in (Lexeme.inCommon, Lexeme.isHidden):
1631            return (first, start)
1632
1633        # Must have at least 3 tokens at this point, or else we need to
1634        # have the inCommon or isHidden lexeme second.
1635        if nTokens < 3:
1636            return (first, start)
1637
1638        third = tokens[start + 2]
1639        if not isinstance(third, str):
1640            return (first, start)
1641
1642        second = cast(Lexeme, second)
1643        third = cast(str, third)
1644
1645        if second in (Lexeme.tokenCount, Lexeme.skillLevel):
1646            try:
1647                num = int(third)
1648            except ValueError:
1649                raise ParseError(
1650                    f"Invalid effect tokens: count for Tokens or level"
1651                    f" for Skill must be convertible to an integer."
1652                    f"\n{tokens[start:limit + 1]}"
1653                )
1654            if second == Lexeme.tokenCount:
1655                return ((first, num), start + 2)  # token/count pair
1656            else:
1657                return (('skill', first, num), start + 2)  # token/count pair
1658
1659        elif second == Lexeme.mechanismSeparator:  # bare mechanism
1660            return (
1661                (
1662                    base.MechanismSpecifier(
1663                        domain=None,
1664                        zone=None,
1665                        decision=None,
1666                        name=first
1667                    ),
1668                    third
1669                ),
1670                start + 2
1671            )
1672
1673        elif second in (Lexeme.domainSeparator, Lexeme.zoneSeparator):
1674            try:
1675                mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
1676                    tokens,
1677                    start
1678                )  # works whether it's a mechanism or decision specifier...
1679            except ParseError:
1680                return self.parseDecisionSpecifierFromTokens(tokens, start)
1681            if mEnd + 2 > limit:
1682                # No room for following mechanism separator + state
1683                return self.parseDecisionSpecifierFromTokens(tokens, start)
1684            sep = tokens[mEnd + 1]
1685            after = tokens[mEnd + 2]
1686            if sep == Lexeme.mechanismSeparator:
1687                if not isinstance(after, str):
1688                    raise ParseError(
1689                        f"Mechanism separator not followed by state:"
1690                        f"\n{tokens[start]}"
1691                    )
1692                return ((mSpec, after), mEnd + 2)
1693            else:
1694                # No mechanism separator afterwards
1695                return self.parseDecisionSpecifierFromTokens(tokens, start)
1696
1697        else:  # unrecognized as a longer combo
1698            return (first, start)

Looks at tokens starting at the specified position and parses one or more of them as an effect argument (an argument that could be given to base.effect). Looks at various key Lexemes to determine which type to use.

Items in the tokens list beyond the specified limit will not be considered, even when they in theory could be grouped with items up to the limit into a more complex argument.

For example:

>>> pf = ParseFormat()
>>> pf.parseOneEffectArg(['hi'])
('hi', 0)
>>> pf.parseOneEffectArg(['hi'], 1)
Traceback (most recent call last):
...
IndexError...
>>> pf.parseOneEffectArg(['hi', 'bye'])
('hi', 0)
>>> pf.parseOneEffectArg(['hi', 'bye'], 1)
('bye', 1)
>>> pf.parseOneEffectArg(
...     ['gate', Lexeme.mechanismSeparator, 'open'],
...     0
... )
((MechanismSpecifier(domain=None, zone=None, decision=None, name='gate'), 'open'), 2)
>>> pf.parseOneEffectArg(
...     ['set', 'gate', Lexeme.mechanismSeparator, 'open'],
...     1
... )
((MechanismSpecifier(domain=None, zone=None, decision=None, name='gate'), 'open'), 3)
>>> pf.parseOneEffectArg(
...     ['gate', Lexeme.mechanismSeparator, 'open'],
...     1
... )
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg(
...     ['gate', Lexeme.mechanismSeparator, 'open'],
...     2
... )
('open', 2)
>>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, '10'], 0)
(('gold', 10), 2)
>>> pf.parseOneEffectArg(['gold', Lexeme.tokenCount, 'ten'], 0)
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg([Lexeme.inCommon], 0)
(<Lexeme.inCommon: ...>, 0)
>>> pf.parseOneEffectArg([Lexeme.isHidden], 0)
(<Lexeme.isHidden: ...>, 0)
>>> pf.parseOneEffectArg([Lexeme.tokenCount, '3'], 0)
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg([Lexeme.effectCharges, '3'], 0)
((<Lexeme.effectCharges: ...>, 3), 1)
>>> pf.parseOneEffectArg([Lexeme.tokenCount, 3], 0)  # int is a lexeme
Traceback (most recent call last):
...
ParseError...
>>> pf.parseOneEffectArg([Lexeme.sepOrDelay, '-2'], 0)
((<Lexeme.sepOrDelay: ...>, -2), 1)
>>> pf.parseOneEffectArg(['agility', Lexeme.skillLevel, '3'], 0)
(('skill', 'agility', 3), 2)
>>> pf.parseOneEffectArg(
...     [
...         'main',
...         Lexeme.domainSeparator,
...         'zone',
...         Lexeme.zoneSeparator,
...         'decision',
...         Lexeme.zoneSeparator,
...         'compass',
...         Lexeme.mechanismSeparator,
...         'north',
...         'south',
...         'east',
...         'west'
...     ],
...     0
... )
((MechanismSpecifier(domain='main', zone='zone', decision='decision', name='compass'), 'north'), 8)
>>> pf.parseOneEffectArg(
...     [
...         'before',
...         'main',
...         Lexeme.domainSeparator,
...         'zone',
...         Lexeme.zoneSeparator,
...         'decision',
...         Lexeme.zoneSeparator,
...         'compass',
...         'north',
...         'south',
...         'east',
...         'west'
...     ],
...     1
... )  # a mechanism specifier without a state will become a
...    # decision specifier
(DecisionSpecifier(domain='main', zone='zone', name='decision'), 5)
>>> tokens = [
...     'set',
...     'main',
...     Lexeme.domainSeparator,
...     'zone',
...     Lexeme.zoneSeparator,
...     'compass',
...     'north',
...     'bounce',
... ]
>>> pf.parseOneEffectArg(tokens, 0)
('set', 0)
>>> pf.parseDecisionSpecifierFromTokens(tokens, 1)
(DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
>>> pf.parseOneEffectArg(tokens, 1)
(DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
>>> pf.parseOneEffectArg(tokens, 6)
('north', 6)
>>> pf.parseOneEffectArg(tokens, 7)
('bounce', 7)
>>> pf.parseOneEffectArg(
...     [
...         "fort", Lexeme.zoneSeparator, "gate",
...             Lexeme.mechanismSeparator, "open",
...     ],
...     0
... )
((MechanismSpecifier(domain=None, zone=None, decision='fort', name='gate'), 'open'), 4)
>>> pf.parseOneEffectArg(
...     [Lexeme.openCurly, 'val', '5', Lexeme.closeCurly],
...     0
... ) == ([commands.command('val', '5')], 3)
True
>>> a = [
...     Lexeme.openCurly, 'val', '5', Lexeme.closeCurly,
...     Lexeme.openCurly, 'append', Lexeme.consequenceSeparator,
...     'pop', Lexeme.closeCurly
... ]
>>> cl = [
...     [commands.command('val', '5')],
...     [commands.command('append'), commands.command('pop')]
... ]
>>> pf.parseOneEffectArg(a, 0) == (cl[0], 3)
True
>>> pf.parseOneEffectArg(a, 4) == (cl[1], 8)
True
>>> pf.parseOneEffectArg(a, 1)
('val', 1)
>>> pf.parseOneEffectArg(a, 2)
('5', 2)
>>> pf.parseOneEffectArg(a, 3)
Traceback (most recent call last):
...
ParseError...
1700    def coalesceEffectArgs(
1701        self,
1702        tokens: LexedTokens,
1703        start: int = 0,
1704        end: int = -1
1705    ) -> Tuple[
1706        List[  # List of effect args
1707            Union[
1708                base.Capability,  # covers 'str' possibility
1709                Tuple[base.Token, base.TokenCount],
1710                Tuple[Literal['skill'], base.Skill, base.Level],
1711                Tuple[base.MechanismSpecifier, base.MechanismState],
1712                base.DecisionSpecifier,
1713                List[commands.Command],
1714                Set[str]
1715            ]
1716        ],
1717        Tuple[  # Slots for modifiers: common/hidden/charges/delay
1718            Optional[bool],
1719            Optional[bool],
1720            Optional[int],
1721            Optional[int],
1722        ]
1723    ]:
1724        """
1725        Given a region of a lexed tokens list which contains one or more
1726        effect arguments, combines token sequences representing things
1727        like capabilities, mechanism states, token counts, and skill
1728        levels, representing these using the tuples that would be passed
1729        to `base.effect`. Returns a tuple with two elements:
1730
1731        - First, a list that contains several different kinds of
1732            objects, each of which is distinguishable by its type or
1733            part of its value.
1734        - Next, a tuple with four entires for common, hidden, charges,
1735            and/or delay values based on the presence of modifier
1736            sequences. Any or all of these may be `None` if the relevant
1737            modifier was not present (the usual case).
1738
1739        For example:
1740
1741        >>> pf = ParseFormat()
1742        >>> pf.coalesceEffectArgs(["jump"])
1743        (['jump'], (None, None, None, None))
1744        >>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
1745        ([('coin', 3), 'fly'], (None, None, None, None))
1746        >>> pf.coalesceEffectArgs(
1747        ...     [
1748        ...         "fort", Lexeme.zoneSeparator, "gate",
1749        ...             Lexeme.mechanismSeparator, "open"
1750        ...     ]
1751        ... )
1752        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1753 name='gate'), 'open')], (None, None, None, None))
1754        >>> pf.coalesceEffectArgs(
1755        ...     [
1756        ...         "main", Lexeme.domainSeparator, "cliff"
1757        ...     ]
1758        ... )
1759        ([DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1760 (None, None, None, None))
1761        >>> pf.coalesceEffectArgs(
1762        ...     [
1763        ...         "door", Lexeme.mechanismSeparator, "open"
1764        ...     ]
1765        ... )
1766        ([(MechanismSpecifier(domain=None, zone=None, decision=None,\
1767 name='door'), 'open')], (None, None, None, None))
1768        >>> pf.coalesceEffectArgs(
1769        ...     [
1770        ...         "fort", Lexeme.zoneSeparator, "gate",
1771        ...             Lexeme.mechanismSeparator, "open",
1772        ...         "canJump",
1773        ...         "coins", Lexeme.tokenCount, "3",
1774        ...         Lexeme.inCommon,
1775        ...         "agility", Lexeme.skillLevel, "-1",
1776        ...         Lexeme.sepOrDelay, "0",
1777        ...         "main", Lexeme.domainSeparator, "cliff"
1778        ...     ]
1779        ... )
1780        ([(MechanismSpecifier(domain=None, zone=None, decision='fort',\
1781 name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1),\
1782 DecisionSpecifier(domain='main', zone=None, name='cliff')],\
1783 (True, None, None, 0))
1784        >>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
1785        (['bounce'], (None, True, None, None))
1786        >>> pf.coalesceEffectArgs(
1787        ...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
1788        ... )
1789        (['goto', '3'], (True, True, None, None))
1790        """
1791        start, end, nTokens = normalizeEnds(tokens, start, end)
1792        where = start
1793        result: List[  # List of effect args
1794            Union[
1795                base.Capability,  # covers 'str' possibility
1796                Tuple[base.Token, base.TokenCount],
1797                Tuple[Literal['skill'], base.Skill, base.Level],
1798                Tuple[base.MechanismSpecifier, base.MechanismState],
1799                base.DecisionSpecifier,
1800                List[commands.Command],
1801                Set[str]
1802            ]
1803        ] = []
1804        inCommon: Optional[bool] = None
1805        isHidden: Optional[bool] = None
1806        charges: Optional[int] = None
1807        delay: Optional[int] = None
1808        while where <= end:
1809            following, thisEnd = self.parseOneEffectArg(tokens, where, end)
1810            if following == Lexeme.inCommon:
1811                if inCommon is not None:
1812                    raise ParseError(
1813                        f"In-common effect modifier specified more than"
1814                        f" once in effect args:"
1815                        f"\n{tokens[start:end + 1]}"
1816                    )
1817                inCommon = True
1818            elif following == Lexeme.isHidden:
1819                if isHidden is not None:
1820                    raise ParseError(
1821                        f"Is-hidden effect modifier specified more than"
1822                        f" once in effect args:"
1823                        f"\n{tokens[start:end + 1]}"
1824                    )
1825                isHidden = True
1826            elif (
1827                isinstance(following, tuple)
1828            and len(following) == 2
1829            and following[0] in (Lexeme.effectCharges, Lexeme.sepOrDelay)
1830            and isinstance(following[1], int)
1831            ):
1832                if following[0] == Lexeme.effectCharges:
1833                    if charges is not None:
1834                        raise ParseError(
1835                            f"Charges effect modifier specified more than"
1836                            f" once in effect args:"
1837                            f"\n{tokens[start:end + 1]}"
1838                        )
1839                    charges = following[1]
1840                else:
1841                    if delay is not None:
1842                        raise ParseError(
1843                            f"Delay effect modifier specified more than"
1844                            f" once in effect args:"
1845                            f"\n{tokens[start:end + 1]}"
1846                        )
1847                    delay = following[1]
1848            elif (
1849                    isinstance(following, base.Capability)
1850                 or (
1851                    isinstance(following, tuple)
1852                and len(following) == 2
1853                and isinstance(following[0], base.Token)
1854                and isinstance(following[1], base.TokenCount)
1855                ) or (
1856                    isinstance(following, tuple)
1857                and len(following) == 3
1858                and following[0] == 'skill'
1859                and isinstance(following[1], base.Skill)
1860                and isinstance(following[2], base.Level)
1861                ) or (
1862                    isinstance(following, tuple)
1863                and len(following) == 2
1864                and isinstance(following[0], base.MechanismSpecifier)
1865                and isinstance(following[1], base.MechanismState)
1866                ) or (
1867                    isinstance(following, base.DecisionSpecifier)
1868                ) or (
1869                    isinstance(following, list)
1870                and all(isinstance(item, tuple) for item in following)
1871                    # TODO: Stricter command list check here?
1872                ) or (
1873                    isinstance(following, set)
1874                and all(isinstance(item, str) for item in following)
1875                )
1876            ):
1877                result.append(following)
1878            else:
1879                raise ParseError(f"Invalid coalesced argument: {following}")
1880            where = thisEnd + 1
1881
1882        return (result, (inCommon, isHidden, charges, delay))

Given a region of a lexed tokens list which contains one or more effect arguments, combines token sequences representing things like capabilities, mechanism states, token counts, and skill levels, representing these using the tuples that would be passed to base.effect. Returns a tuple with two elements:

  • First, a list that contains several different kinds of objects, each of which is distinguishable by its type or part of its value.
  • Next, a tuple with four entires for common, hidden, charges, and/or delay values based on the presence of modifier sequences. Any or all of these may be None if the relevant modifier was not present (the usual case).

For example:

>>> pf = ParseFormat()
>>> pf.coalesceEffectArgs(["jump"])
(['jump'], (None, None, None, None))
>>> pf.coalesceEffectArgs(["coin", Lexeme.tokenCount, "3", "fly"])
([('coin', 3), 'fly'], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "fort", Lexeme.zoneSeparator, "gate",
...             Lexeme.mechanismSeparator, "open"
...     ]
... )
([(MechanismSpecifier(domain=None, zone=None, decision='fort', name='gate'), 'open')], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "main", Lexeme.domainSeparator, "cliff"
...     ]
... )
([DecisionSpecifier(domain='main', zone=None, name='cliff')], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "door", Lexeme.mechanismSeparator, "open"
...     ]
... )
([(MechanismSpecifier(domain=None, zone=None, decision=None, name='door'), 'open')], (None, None, None, None))
>>> pf.coalesceEffectArgs(
...     [
...         "fort", Lexeme.zoneSeparator, "gate",
...             Lexeme.mechanismSeparator, "open",
...         "canJump",
...         "coins", Lexeme.tokenCount, "3",
...         Lexeme.inCommon,
...         "agility", Lexeme.skillLevel, "-1",
...         Lexeme.sepOrDelay, "0",
...         "main", Lexeme.domainSeparator, "cliff"
...     ]
... )
([(MechanismSpecifier(domain=None, zone=None, decision='fort', name='gate'), 'open'), 'canJump', ('coins', 3), ('skill', 'agility', -1), DecisionSpecifier(domain='main', zone=None, name='cliff')], (True, None, None, 0))
>>> pf.coalesceEffectArgs(["bounce", Lexeme.isHidden])
(['bounce'], (None, True, None, None))
>>> pf.coalesceEffectArgs(
...     ["goto", "3", Lexeme.inCommon, Lexeme.isHidden]
... )
(['goto', '3'], (True, True, None, None))
def parseEffectFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Effect:
1884    def parseEffectFromTokens(
1885        self,
1886        tokens: LexedTokens,
1887        start: int = 0,
1888        end: int = -1
1889    ) -> base.Effect:
1890        """
1891        Given a region of a list of lexed tokens specifying an effect,
1892        returns the `Effect` object that those tokens specify.
1893        """
1894        start, end, nTokens = normalizeEnds(tokens, start, end)
1895
1896        # Check for empty list
1897        if nTokens == 0:
1898            raise ParseError(
1899                "Effect must include at least a type."
1900            )
1901
1902        firstPart = tokens[start]
1903
1904        if isinstance(firstPart, Lexeme):
1905            raise ParseError(
1906                f"First part of effect must be an effect type. Got"
1907                f" {firstPart} ({repr(self.formatDict[firstPart])})."
1908            )
1909
1910        firstPart = cast(str, firstPart)
1911
1912        # Get the effect type
1913        fType = self.effectType(firstPart)
1914
1915        if fType is None:
1916            raise ParseError(
1917                f"Unrecognized effect type {firstPart!r}. Check the"
1918                f" EffectType entries in the effect names dictionary."
1919            )
1920
1921        if start + 1 > end:  # No tokens left: set empty args
1922            groupedArgs: List[
1923                Union[
1924                    base.Capability,  # covers 'str' possibility
1925                    Tuple[base.Token, base.TokenCount],
1926                    Tuple[Literal['skill'], base.Skill, base.Level],
1927                    Tuple[base.MechanismSpecifier, base.MechanismState],
1928                    base.DecisionSpecifier,
1929                    List[commands.Command],
1930                    Set[str]
1931                ]
1932            ] = []
1933            modifiers: Tuple[
1934                Optional[bool],
1935                Optional[bool],
1936                Optional[int],
1937                Optional[int]
1938            ] = (None, None, None, None)
1939        else:  # Coalesce remaining tokens if there are any
1940            groupedArgs, modifiers = self.coalesceEffectArgs(
1941                tokens,
1942                start + 1,
1943                end
1944            )
1945
1946        # Set up arguments for base.effect and handle modifiers first
1947        args: Dict[
1948            str,
1949            Union[
1950                None,
1951                base.ContextSpecifier,
1952                base.Capability,
1953                Tuple[base.Token, base.TokenCount],
1954                Tuple[Literal['skill'], base.Skill, base.Level],
1955                Tuple[base.MechanismSpecifier, base.MechanismState],
1956                Tuple[base.MechanismSpecifier, List[base.MechanismState]],
1957                List[base.Capability],
1958                base.AnyDecisionSpecifier,
1959                Tuple[base.AnyDecisionSpecifier, base.FocalPointName],
1960                bool,
1961                int,
1962                base.SaveSlot,
1963                Tuple[base.SaveSlot, Set[str]]
1964            ]
1965        ] = {}
1966        if modifiers[0]:
1967            args['applyTo'] = 'common'
1968        if modifiers[1]:
1969            args['hidden'] = True
1970        else:
1971            args['hidden'] = False
1972        if modifiers[2] is not None:
1973            args['charges'] = modifiers[2]
1974        if modifiers[3] is not None:
1975            args['delay'] = modifiers[3]
1976
1977        # Now handle the main effect-type-based argument
1978        if fType in ("gain", "lose"):
1979            if len(groupedArgs) != 1:
1980                raise ParseError(
1981                    f"'{fType}' effect must have exactly one grouped"
1982                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
1983                )
1984            thing = groupedArgs[0]
1985            if isinstance(thing, tuple):
1986                if len(thing) == 2:
1987                    if (
1988                        not isinstance(thing[0], base.Token)
1989                     or not isinstance(thing[1], base.TokenCount)
1990                    ):
1991                        raise ParseError(
1992                            f"'{fType}' effect grouped arg pair must be a"
1993                            f" (token, amount) pair. Got:\n{thing}"
1994                        )
1995                elif len(thing) == 3:
1996                    if (
1997                        thing[0] != 'skill'
1998                     or not isinstance(thing[1], base.Skill)
1999                     or not isinstance(thing[2], base.Level)
2000                    ):
2001                        raise ParseError(
2002                            f"'{fType}' effect grouped arg pair must be a"
2003                            f" (token, amount) pair. Got:\n{thing}"
2004                        )
2005                else:
2006                    raise ParseError(
2007                        f"'{fType}' effect grouped arg tuple must have"
2008                        f" length 2 or 3. Got (length {len(thing)}):\n{thing}"
2009                    )
2010            elif not isinstance(thing, base.Capability):
2011                raise ParseError(
2012                    f"'{fType}' effect grouped arg must be a capability"
2013                    f" or a (token, amount) tuple. Got:\n{thing}"
2014                )
2015            args[fType] = thing
2016            return base.effect(**args)  # type:ignore
2017
2018        elif fType == "set":
2019            if len(groupedArgs) != 1:
2020                raise ParseError(
2021                    f"'{fType}' effect must have exactly one grouped"
2022                    f" argument (got {len(groupedArgs)}:\n{groupedArgs}"
2023                )
2024            setVal = groupedArgs[0]
2025            if not isinstance(
2026                setVal,
2027                tuple
2028            ):
2029                raise ParseError(
2030                    f"'{fType}' effect grouped arg must be a tuple. Got:"
2031                    f"\n{setVal}"
2032                )
2033            if len(setVal) == 2:
2034                setWhat, setTo = setVal
2035                if (
2036                    isinstance(setWhat, base.Token)
2037                and isinstance(setTo, base.TokenCount)
2038                ) or (
2039                    isinstance(setWhat, base.MechanismSpecifier)
2040                and isinstance(setTo, base.MechanismState)
2041                ):
2042                    args[fType] = setVal
2043                    return base.effect(**args)  # type:ignore
2044                else:
2045                    raise ParseError(
2046                        f"Invalid '{fType}' effect grouped args:"
2047                        f"\n{groupedArgs}"
2048                    )
2049            elif len(setVal) == 3:
2050                indicator, whichSkill, setTo = setVal
2051                if (
2052                    indicator == 'skill'
2053                and isinstance(whichSkill, base.Skill)
2054                and isinstance(setTo, base.Level)
2055                ):
2056                    args[fType] = setVal
2057                    return base.effect(**args)  # type:ignore
2058                else:
2059                    raise ParseError(
2060                        f"Invalid '{fType}' effect grouped args (not a"
2061                        f" skill):\n{groupedArgs}"
2062                    )
2063            else:
2064                raise ParseError(
2065                    f"Invalid '{fType}' effect grouped args (wrong"
2066                    f" length tuple):\n{groupedArgs}"
2067                )
2068
2069        elif fType == "toggle":
2070            if len(groupedArgs) == 0:
2071                raise ParseError(
2072                    f"'{fType}' effect must have at least one grouped"
2073                    f" argument. Got:\n{groupedArgs}"
2074                )
2075            if (
2076                isinstance(groupedArgs[0], tuple)
2077            and len(groupedArgs[0]) == 2
2078            and isinstance(groupedArgs[0][0], base.MechanismSpecifier)
2079            and isinstance(groupedArgs[0][1], base.MechanismState)
2080            and all(
2081                    isinstance(a, base.MechanismState)
2082                    for a in groupedArgs[1:]
2083                )
2084            ):  # a mechanism toggle
2085                args[fType] = (
2086                    groupedArgs[0][0],
2087                    cast(
2088                        List[base.MechanismState],
2089                        [groupedArgs[0][1]] + groupedArgs[1:]
2090                    )
2091                )
2092                return base.effect(**args)  # type:ignore
2093            elif all(isinstance(a, base.Capability) for a in groupedArgs):
2094                # a capability toggle
2095                args[fType] = cast(List[base.Capability], groupedArgs)
2096                return base.effect(**args)  # type:ignore
2097            else:
2098                raise ParseError(
2099                    f"Invalid arguments for '{fType}' effect. Got:"
2100                    f"\n{groupedArgs}"
2101                )
2102
2103        elif fType in ("bounce", "deactivate"):
2104            if len(groupedArgs) != 0:
2105                raise ParseError(
2106                    f"'{fType}' effect may not include any"
2107                    f" arguments. Got {len(groupedArgs)}):"
2108                    f"\n{groupedArgs}"
2109                )
2110            args[fType] = True
2111            return base.effect(**args)  # type:ignore
2112
2113        elif fType == "follow":
2114            if len(groupedArgs) != 1:
2115                raise ParseError(
2116                    f"'{fType}' effect must include exactly one"
2117                    f" argument. Got {len(groupedArgs)}):"
2118                    f"\n{groupedArgs}"
2119                )
2120
2121            transition = groupedArgs[0]
2122            if not isinstance(transition, base.Transition):
2123                raise ParseError(
2124                    f"Invalid argument for '{fType}' effect. Needed a"
2125                    f" transition but got:\n{groupedArgs}"
2126                )
2127            args[fType] = transition
2128            return base.effect(**args)  # type:ignore
2129
2130        elif fType == "edit":
2131            if len(groupedArgs) == 0:
2132                raise ParseError(
2133                    "An 'edit' effect requires at least one argument."
2134                )
2135            for i, arg in enumerate(groupedArgs):
2136                if not isinstance(arg, list):
2137                    raise ParseError(
2138                        f"'edit' effect argument {i} is not a sub-list:"
2139                        f"\n  {arg!r}"
2140                        f"\nAmong arguments:"
2141                        f"\n  {groupedArgs}"
2142                    )
2143                for j, cmd in enumerate(arg):
2144                    if not isinstance(cmd, tuple):
2145                        raise ParseError(
2146                            f"'edit' effect argument {i} contains"
2147                            f" non-tuple part {j}:"
2148                            f"\n  {cmd!r}"
2149                            f"\nAmong arguments:"
2150                            f"\n  {groupedArgs}"
2151                        )
2152
2153            args[fType] = groupedArgs  # type:ignore
2154            return base.effect(**args)  # type:ignore
2155
2156        elif fType == "goto":
2157            if len(groupedArgs) not in (1, 2):
2158                raise ParseError(
2159                    f"A 'goto' effect must include either one or two"
2160                    f" grouped arguments. Got {len(groupedArgs)}:"
2161                    f"\n{groupedArgs}"
2162                )
2163
2164            first = groupedArgs[0]
2165            if not isinstance(
2166                first,
2167                (base.DecisionName, base.DecisionSpecifier)
2168            ):
2169                raise ParseError(
2170                    f"'{fType}' effect must first specify a destination"
2171                    f" decision. Got:\n{groupedArgs}"
2172                )
2173
2174            # Check if it's really a decision ID
2175            dSpec: base.AnyDecisionSpecifier
2176            if isinstance(first, base.DecisionName):
2177                try:
2178                    dSpec = int(first)
2179                except ValueError:
2180                    dSpec = first
2181            else:
2182                dSpec = first
2183
2184            if len(groupedArgs) == 2:
2185                second = groupedArgs[1]
2186                if not isinstance(second, base.FocalPointName):
2187                    raise ParseError(
2188                        f"'{fType}' effect must have a focal point name"
2189                        f" if it has a second part. Got:\n{groupedArgs}"
2190                    )
2191                args[fType] = (dSpec, second)
2192            else:
2193                args[fType] = dSpec
2194
2195            return base.effect(**args)  # type:ignore
2196
2197        elif fType == "save":
2198            if len(groupedArgs) not in (0, 1):
2199                raise ParseError(
2200                    f"'{fType}' effect must include exactly zero or one"
2201                    f" argument(s). Got {len(groupedArgs)}):"
2202                    f"\n{groupedArgs}"
2203                )
2204
2205            if len(groupedArgs) == 1:
2206                slot = groupedArgs[0]
2207            else:
2208                slot = base.DEFAULT_SAVE_SLOT
2209            if not isinstance(slot, base.SaveSlot):
2210                raise ParseError(
2211                    f"Invalid argument for '{fType}' effect. Needed a"
2212                    f" save slot but got:\n{groupedArgs}"
2213                )
2214            args[fType] = slot
2215            return base.effect(**args)  # type:ignore
2216
2217        else:
2218            raise ParseError(f"Invalid effect type: '{fType}'.")

Given a region of a list of lexed tokens specifying an effect, returns the Effect object that those tokens specify.

def parseEffect(self, effectStr: str) -> exploration.base.Effect:
2220    def parseEffect(self, effectStr: str) -> base.Effect:
2221        """
2222        Works like `parseEffectFromTokens` but starts with a raw string.
2223        For example:
2224
2225        >>> pf = ParseFormat()
2226        >>> pf.parseEffect("gain jump") == base.effect(gain='jump')
2227        True
2228        >>> pf.parseEffect("set door:open") == base.effect(
2229        ...     set=(
2230        ...         base.MechanismSpecifier(None, None, None, 'door'),
2231        ...         'open'
2232        ...     )
2233        ... )
2234        True
2235        >>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
2236        True
2237        >>> pf.parseEffect("set agility^3") == base.effect(
2238        ...     set=('skill', 'agility', 3)
2239        ... )
2240        True
2241        """
2242        return self.parseEffectFromTokens(self.lex(effectStr))

Works like parseEffectFromTokens but starts with a raw string. For example:

>>> pf = ParseFormat()
>>> pf.parseEffect("gain jump") == base.effect(gain='jump')
True
>>> pf.parseEffect("set door:open") == base.effect(
...     set=(
...         base.MechanismSpecifier(None, None, None, 'door'),
...         'open'
...     )
... )
True
>>> pf.parseEffect("set coins*10") == base.effect(set=('coins', 10))
True
>>> pf.parseEffect("set agility^3") == base.effect(
...     set=('skill', 'agility', 3)
... )
True
def unparseEffect(self, effect: exploration.base.Effect) -> str:
2244    def unparseEffect(self, effect: base.Effect) -> str:
2245        """
2246        The opposite of `parseEffect`; turns an effect back into a
2247        string reprensentation.
2248
2249        For example:
2250
2251        >>> pf = ParseFormat()
2252        >>> e = {
2253        ...     "type": "gain",
2254        ...     "applyTo": "active",
2255        ...     "value": "flight",
2256        ...     "delay": None,
2257        ...     "charges": None,
2258        ...     "hidden": False
2259        ... }
2260        >>> pf.unparseEffect(e)
2261        'gain flight'
2262        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2263        True
2264        >>> s = 'gain flight'
2265        >>> pf.unparseEffect(pf.parseEffect(s)) == s
2266        True
2267        >>> s2 = '  gain\\nflight'
2268        >>> pf.unparseEffect(pf.parseEffect(s2)) == s
2269        True
2270        >>> e = {
2271        ...     "type": "gain",
2272        ...     "applyTo": "active",
2273        ...     "value": ("gold", 5),
2274        ...     "delay": 1,
2275        ...     "charges": 2,
2276        ...     "hidden": False
2277        ... }
2278        >>> pf.unparseEffect(e)
2279        'gain gold*5 ,1 =2'
2280        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2281        True
2282        >>> e = {
2283        ...     "type": "set",
2284        ...     "applyTo": "active",
2285        ...     "value": (
2286        ...         base.MechanismSpecifier(None, None, None, "gears"),
2287        ...         "on"
2288        ...     ),
2289        ...     "delay": None,
2290        ...     "charges": 1,
2291        ...     "hidden": False
2292        ... }
2293        >>> pf.unparseEffect(e)
2294        'set gears:on =1'
2295        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2296        True
2297        >>> e = {
2298        ...     "type": "toggle",
2299        ...     "applyTo": "active",
2300        ...     "value": ["red", "blue"],
2301        ...     "delay": None,
2302        ...     "charges": None,
2303        ...     "hidden": False
2304        ... }
2305        >>> pf.unparseEffect(e)
2306        'toggle red blue'
2307        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2308        True
2309        >>> e = {
2310        ...     "type": "toggle",
2311        ...     "applyTo": "active",
2312        ...     "value": (
2313        ...         base.MechanismSpecifier(None, None, None, "switch"),
2314        ...         ["on", "off"]
2315        ...     ),
2316        ...     "delay": None,
2317        ...     "charges": None,
2318        ...     "hidden": False
2319        ... }
2320        >>> pf.unparseEffect(e)
2321        'toggle switch:on off'
2322        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2323        True
2324        >>> e = {
2325        ...     "type": "deactivate",
2326        ...     "applyTo": "active",
2327        ...     "value": None,
2328        ...     "delay": 2,
2329        ...     "charges": None,
2330        ...     "hidden": False
2331        ... }
2332        >>> pf.unparseEffect(e)
2333        'deactivate ,2'
2334        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2335        True
2336        >>> e = {
2337        ...     "type": "goto",
2338        ...     "applyTo": "common",
2339        ...     "value": 3,
2340        ...     "delay": None,
2341        ...     "charges": None,
2342        ...     "hidden": False
2343        ... }
2344        >>> pf.unparseEffect(e)
2345        'goto 3 +c'
2346        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2347        True
2348        >>> e = {
2349        ...     "type": "goto",
2350        ...     "applyTo": "common",
2351        ...     "value": 3,
2352        ...     "delay": None,
2353        ...     "charges": None,
2354        ...     "hidden": True
2355        ... }
2356        >>> pf.unparseEffect(e)
2357        'goto 3 +c +h'
2358        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2359        True
2360        >>> e = {
2361        ...     "type": "goto",
2362        ...     "applyTo": "active",
2363        ...     "value": 'home',
2364        ...     "delay": None,
2365        ...     "charges": None,
2366        ...     "hidden": False
2367        ... }
2368        >>> pf.unparseEffect(e)
2369        'goto home'
2370        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2371        True
2372        >>> e = base.effect(edit=[
2373        ...     [
2374        ...         commands.command('val', '5'),
2375        ...         commands.command('empty', 'list'),
2376        ...         commands.command('append', '$_')
2377        ...     ],
2378        ...     [
2379        ...         commands.command('val', '11'),
2380        ...         commands.command('assign', 'var', '$_'),
2381        ...         commands.command('op', '+', '$var', '$var')
2382        ...     ],
2383        ... ])
2384        >>> pf.unparseEffect(e)
2385        'edit {\\n  val 5;\\n  empty list;\\n  append $_;\\n}\
2386 {\\n  val 11;\\n  assign var $_;\\n  op + $var $var;\\n}'
2387        >>> pf.parseEffect(pf.unparseEffect(e)) == e
2388        True
2389        """
2390        result: List[str] = []
2391
2392        # Reverse the effect type into a marker
2393        eType = effect['type']
2394        for key, val in self.effectNames.items():
2395            if val == eType:
2396                if len(result) != 0:
2397                    raise ParseError(
2398                        f"Effect map contains multiple matching entries"
2399                        f"for effect type '{effect['type']}':"
2400                        f" '{result[0]}' and '{key}'"
2401                    )
2402                result.append(key)
2403                # Don't break 'cause we'd like to check uniqueness
2404
2405        eVal = effect['value']
2406        if eType in ('gain', 'lose'):
2407            eVal = cast(Union[base.Capability, Tuple[base.Token, int]], eVal)
2408            if isinstance(eVal, str):  # a capability
2409                result.append(eVal)
2410            else:  # a token
2411                result.append(
2412                    eVal[0]
2413                  + self.formatDict[Lexeme.tokenCount]
2414                  + str(eVal[1])
2415                )
2416        elif eType == 'set':
2417            eVal = cast(
2418                # TODO: Add skill level setting here & elsewhere
2419                Union[
2420                    Tuple[base.Token, base.TokenCount],
2421                    Tuple[base.MechanismSpecifier, base.MechanismState]
2422                ],
2423                eVal
2424            )
2425            if len(eVal) != 2:
2426                raise ValueError(
2427                    f"'set' effect has non-length-2 value:"
2428                    f"\n  {repr(effect)}"
2429                )
2430            if isinstance(eVal[1], int):  # a token count
2431                result.append(eVal[0])
2432                result.append(self.formatDict[Lexeme.tokenCount])
2433                result.append(str(eVal[1]))
2434            else:  # a mechanism
2435                if isinstance(eVal[0], base.MechanismSpecifier):
2436                    mSpec = self.unparseMechanismSpecifier(eVal[0])
2437                else:
2438                    print(f"eval[0] is: {type(eVal[0])} : {eVal[0]!r}")
2439                    assert isinstance(eVal[0], base.MechanismName)
2440                    mSpec = eVal[0]
2441                result.append(
2442                    mSpec
2443                  + self.formatDict[Lexeme.mechanismSeparator]
2444                  + eVal[1]
2445                )
2446        elif eType == 'toggle':
2447            if isinstance(eVal, tuple):  # mechanism states
2448                tSpec, states = cast(
2449                    Tuple[
2450                        base.AnyMechanismSpecifier,
2451                        List[base.MechanismState]
2452                    ],
2453                    eVal
2454                )
2455                firstState = states[0]
2456                restStates = states[1:]
2457                if isinstance(tSpec, base.MechanismSpecifier):
2458                    mStr = self.unparseMechanismSpecifier(tSpec)
2459                else:
2460                    mStr = str(tSpec)
2461                result.append(
2462                    mStr
2463                  + self.formatDict[Lexeme.mechanismSeparator]
2464                  + firstState
2465                )
2466                result.extend(restStates)
2467            else:  # capabilities
2468                assert isinstance(eVal, list)
2469                eVal = cast(List[base.Capability], eVal)
2470                result.extend(eVal)
2471        elif eType in ('deactivate', 'bounce'):
2472            if eVal is not None:
2473                raise ValueError(
2474                    f"'{eType}' effect has non-None value:"
2475                    f"\n  {repr(effect)}"
2476                )
2477        elif eType == 'follow':
2478            eVal = cast(base.Token, eVal)
2479            result.append(eVal)
2480        elif eType == 'edit':
2481            eVal = cast(List[List[commands.Command]], eVal)
2482            if len(eVal) == 0:
2483                result[-1] = '{}'
2484            else:
2485                for cmdList in eVal:
2486                    result.append(
2487                        self.unparseCommandList(cmdList)
2488                    )
2489        elif eType == 'goto':
2490            if isinstance(eVal, base.DecisionSpecifier):
2491                result.append(self.unparseDecisionSpecifier(eVal))
2492            elif isinstance(eVal, (base.DecisionID, base.DecisionName)):
2493                result.append(str(eVal))
2494            elif (
2495                isinstance(eVal, tuple)
2496            and len(eVal) == 2
2497            and isinstance(eVal[1], base.FocalPointName)
2498            ):
2499                if isinstance(eVal[0], base.DecisionSpecifier):
2500                    result.append(self.unparseDecisionSpecifier(eVal[0]))
2501                else:
2502                    result.append(str(eVal[0]))
2503                result.append(eVal[1])
2504            else:
2505                raise ValueError(
2506                    f"'{eType}' effect has invalid value {eVal}"
2507                )
2508        elif eType == 'save':
2509            # It's just a string naming the save slot
2510            result.append(eVal)
2511        else:
2512            raise ValueError(
2513                f"Unrecognized effect type '{eType}' in effect:"
2514                f"\n  {repr(effect)}"
2515            )
2516
2517        # Add modifier strings
2518        if effect['applyTo'] == 'common':
2519            result.append(self.formatDict[Lexeme.inCommon])
2520
2521        if effect['hidden']:
2522            result.append(self.formatDict[Lexeme.isHidden])
2523
2524        dVal = effect['delay']
2525        if dVal is not None:
2526            result.append(
2527                self.formatDict[Lexeme.sepOrDelay] + str(dVal)
2528            )
2529
2530        cVal = effect['charges']
2531        if cVal is not None:
2532            result.append(
2533                self.formatDict[Lexeme.effectCharges] + str(cVal)
2534            )
2535
2536        joined = ''
2537        before = False
2538        for r in result:
2539            if (
2540                r.startswith(' ')
2541             or r.startswith('\n')
2542             or r.endswith(' ')
2543             or r.endswith('\n')
2544            ):
2545                joined += r
2546                before = False
2547            else:
2548                joined += (' ' if before else '') + r
2549                before = True
2550        return joined

The opposite of parseEffect; turns an effect back into a string reprensentation.

For example:

>>> pf = ParseFormat()
>>> e = {
...     "type": "gain",
...     "applyTo": "active",
...     "value": "flight",
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'gain flight'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> s = 'gain flight'
>>> pf.unparseEffect(pf.parseEffect(s)) == s
True
>>> s2 = '  gain\nflight'
>>> pf.unparseEffect(pf.parseEffect(s2)) == s
True
>>> e = {
...     "type": "gain",
...     "applyTo": "active",
...     "value": ("gold", 5),
...     "delay": 1,
...     "charges": 2,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'gain gold*5 ,1 =2'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "set",
...     "applyTo": "active",
...     "value": (
...         base.MechanismSpecifier(None, None, None, "gears"),
...         "on"
...     ),
...     "delay": None,
...     "charges": 1,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'set gears:on =1'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "toggle",
...     "applyTo": "active",
...     "value": ["red", "blue"],
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'toggle red blue'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "toggle",
...     "applyTo": "active",
...     "value": (
...         base.MechanismSpecifier(None, None, None, "switch"),
...         ["on", "off"]
...     ),
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'toggle switch:on off'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "deactivate",
...     "applyTo": "active",
...     "value": None,
...     "delay": 2,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'deactivate ,2'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "goto",
...     "applyTo": "common",
...     "value": 3,
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'goto 3 +c'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "goto",
...     "applyTo": "common",
...     "value": 3,
...     "delay": None,
...     "charges": None,
...     "hidden": True
... }
>>> pf.unparseEffect(e)
'goto 3 +c +h'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = {
...     "type": "goto",
...     "applyTo": "active",
...     "value": 'home',
...     "delay": None,
...     "charges": None,
...     "hidden": False
... }
>>> pf.unparseEffect(e)
'goto home'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
>>> e = base.effect(edit=[
...     [
...         commands.command('val', '5'),
...         commands.command('empty', 'list'),
...         commands.command('append', '$_')
...     ],
...     [
...         commands.command('val', '11'),
...         commands.command('assign', 'var', '$_'),
...         commands.command('op', '+', '$var', '$var')
...     ],
... ])
>>> pf.unparseEffect(e)
'edit {\n  val 5;\n  empty list;\n  append $_;\n} {\n  val 11;\n  assign var $_;\n  op + $var $var;\n}'
>>> pf.parseEffect(pf.unparseEffect(e)) == e
True
def parseDecisionSpecifierFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0) -> Tuple[Union[exploration.base.DecisionSpecifier, int], int]:
2552    def parseDecisionSpecifierFromTokens(
2553        self,
2554        tokens: LexedTokens,
2555        start: int = 0
2556    ) -> Tuple[Union[base.DecisionSpecifier, int], int]:
2557        """
2558        Parses a decision specifier starting at the specified position
2559        in the given tokens list. No ending position is specified, but
2560        instead this function returns a tuple containing the parsed
2561        `base.DecisionSpecifier` along with an index in the tokens list
2562        where the end of the specifier was found.
2563
2564        For example:
2565
2566        >>> pf = ParseFormat()
2567        >>> pf.parseDecisionSpecifierFromTokens(['m'])
2568        (DecisionSpecifier(domain=None, zone=None, name='m'), 0)
2569        >>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
2570        (12, 0)
2571        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
2572        (DecisionSpecifier(domain=None, zone=None, name='a'), 0)
2573        >>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
2574        (DecisionSpecifier(domain=None, zone=None, name='m'), 1)
2575        >>> pf.parseDecisionSpecifierFromTokens(
2576        ...     ['a', Lexeme.domainSeparator, 'm']
2577        ... )
2578        (DecisionSpecifier(domain='a', zone=None, name='m'), 2)
2579        >>> pf.parseDecisionSpecifierFromTokens(
2580        ...     ['a', Lexeme.zoneSeparator, 'm']
2581        ... )
2582        (DecisionSpecifier(domain=None, zone='a', name='m'), 2)
2583        >>> pf.parseDecisionSpecifierFromTokens(
2584        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
2585        ... )
2586        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2587        >>> pf.parseDecisionSpecifierFromTokens(
2588        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
2589        ... )
2590        (DecisionSpecifier(domain='a', zone='b', name='m'), 4)
2591        >>> pf.parseDecisionSpecifierFromTokens(
2592        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
2593        ... )
2594        (DecisionSpecifier(domain=None, zone='a', name='b'), 2)
2595        >>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
2596        ...     ['a', Lexeme.zoneSeparator, '5'],
2597        ... )
2598        Traceback (most recent call last):
2599        ...
2600        exploration.base.InvalidDecisionSpecifierError...
2601        >>> pf.parseDecisionSpecifierFromTokens(
2602        ...     ['d', Lexeme.domainSeparator, '123']
2603        ... )
2604        Traceback (most recent call last):
2605        ...
2606        exploration.base.InvalidDecisionSpecifierError...
2607        >>> pf.parseDecisionSpecifierFromTokens(
2608        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2609        ...     1
2610        ... )
2611        Traceback (most recent call last):
2612        ...
2613        exploration.parsing.ParseError...
2614        >>> pf.parseDecisionSpecifierFromTokens(
2615        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
2616        ...     2
2617        ... )
2618        (DecisionSpecifier(domain='b', zone=None, name='m'), 4)
2619        >>> pf.parseDecisionSpecifierFromTokens(
2620        ...     [
2621        ...         'a',
2622        ...         Lexeme.domainSeparator,
2623        ...         'b',
2624        ...         Lexeme.zoneSeparator,
2625        ...         'c',
2626        ...         Lexeme.zoneSeparator,
2627        ...         'm'
2628        ...     ]
2629        ... )
2630        (DecisionSpecifier(domain='a', zone='b', name='c'), 4)
2631        >>> pf.parseDecisionSpecifierFromTokens(
2632        ...     [
2633        ...         'a',
2634        ...         Lexeme.domainSeparator,
2635        ...         'b',
2636        ...         Lexeme.zoneSeparator,
2637        ...         'c',
2638        ...         Lexeme.zoneSeparator,
2639        ...         'm'
2640        ...     ],
2641        ...     2
2642        ... )
2643        (DecisionSpecifier(domain=None, zone='b', name='c'), 4)
2644        >>> pf.parseDecisionSpecifierFromTokens(
2645        ...     [
2646        ...         'a',
2647        ...         Lexeme.domainSeparator,
2648        ...         'b',
2649        ...         Lexeme.zoneSeparator,
2650        ...         'c',
2651        ...         Lexeme.zoneSeparator,
2652        ...         'm'
2653        ...     ],
2654        ...     4
2655        ... )
2656        (DecisionSpecifier(domain=None, zone='c', name='m'), 6)
2657        >>> pf.parseDecisionSpecifierFromTokens(
2658        ...     [
2659        ...         'set',
2660        ...         'main',
2661        ...         Lexeme.domainSeparator,
2662        ...         'zone',
2663        ...         Lexeme.zoneSeparator,
2664        ...         'compass',
2665        ...         'north',
2666        ...         'bounce',
2667        ...     ],
2668        ...     1
2669        ... )
2670        (DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
2671        """
2672        # Check bounds & normalize start index
2673        nTokens = len(tokens)
2674        if start < -nTokens:
2675            raise IndexError(
2676                f"Invalid start index {start} for {nTokens} tokens (too"
2677                f" negative)."
2678            )
2679        elif start >= nTokens:
2680            raise IndexError(
2681                f"Invalid start index {start} for {nTokens} tokens (too"
2682                f" big)."
2683            )
2684        elif start < 0:
2685            start = nTokens + start
2686
2687        assert (start < nTokens)
2688
2689        first = tokens[start]
2690        if not isinstance(first, str):
2691            raise ParseError(
2692                f"Invalid domain specifier (must start with a name or"
2693                f" id; got: {first} = {self.formatDict[first]})."
2694            )
2695
2696        ds = base.DecisionSpecifier(None, None, first)
2697        result = (base.idOrDecisionSpecifier(ds), start)
2698
2699        domain = None
2700        zoneOrDecision = None
2701
2702        if start + 1 >= nTokens:  # at end of tokens
2703            return result
2704
2705        firstSep = tokens[start + 1]
2706        if firstSep == Lexeme.domainSeparator:
2707            domain = first
2708        elif firstSep == Lexeme.zoneSeparator:
2709            zoneOrDecision = first
2710        else:
2711            return result
2712
2713        if start + 2 >= nTokens:
2714            return result
2715
2716        second = tokens[start + 2]
2717        if isinstance(second, Lexeme):
2718            return result
2719
2720        ds = base.DecisionSpecifier(domain, zoneOrDecision, second)
2721        result = (base.idOrDecisionSpecifier(ds), start + 2)
2722
2723        if start + 3 >= nTokens:
2724            return result
2725
2726        secondSep = tokens[start + 3]
2727        if start + 4 >= nTokens:
2728            return result
2729
2730        third = tokens[start + 4]
2731        if secondSep == Lexeme.zoneSeparator:
2732            if zoneOrDecision is not None:  # two in a row
2733                return result
2734            else:
2735                if not isinstance(third, base.DecisionName):
2736                    return result
2737                else:
2738                    zoneOrDecision = second
2739        else:
2740            return result
2741
2742        if isinstance(third, Lexeme):
2743            return result
2744
2745        ds = base.DecisionSpecifier(domain, zoneOrDecision, third)
2746        return (base.idOrDecisionSpecifier(ds), start + 4)

Parses a decision specifier starting at the specified position in the given tokens list. No ending position is specified, but instead this function returns a tuple containing the parsed base.DecisionSpecifier along with an index in the tokens list where the end of the specifier was found.

For example:

>>> pf = ParseFormat()
>>> pf.parseDecisionSpecifierFromTokens(['m'])
(DecisionSpecifier(domain=None, zone=None, name='m'), 0)
>>> pf.parseDecisionSpecifierFromTokens(['12'])  # ID specifier
(12, 0)
>>> pf.parseDecisionSpecifierFromTokens(['a', 'm'])
(DecisionSpecifier(domain=None, zone=None, name='a'), 0)
>>> pf.parseDecisionSpecifierFromTokens(['a', 'm'], 1)
(DecisionSpecifier(domain=None, zone=None, name='m'), 1)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'm']
... )
(DecisionSpecifier(domain='a', zone=None, name='m'), 2)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'm']
... )
(DecisionSpecifier(domain=None, zone='a', name='m'), 2)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(DecisionSpecifier(domain=None, zone='a', name='b'), 2)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(DecisionSpecifier(domain='a', zone='b', name='m'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
... )
(DecisionSpecifier(domain=None, zone='a', name='b'), 2)
>>> pf.parseDecisionSpecifierFromTokens(  # ID-style name w/ zone
...     ['a', Lexeme.zoneSeparator, '5'],
... )
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifierFromTokens(
...     ['d', Lexeme.domainSeparator, '123']
... )
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     1
... )
Traceback (most recent call last):
...
ParseError...
>>> pf.parseDecisionSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     2
... )
(DecisionSpecifier(domain='b', zone=None, name='m'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ]
... )
(DecisionSpecifier(domain='a', zone='b', name='c'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     2
... )
(DecisionSpecifier(domain=None, zone='b', name='c'), 4)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     4
... )
(DecisionSpecifier(domain=None, zone='c', name='m'), 6)
>>> pf.parseDecisionSpecifierFromTokens(
...     [
...         'set',
...         'main',
...         Lexeme.domainSeparator,
...         'zone',
...         Lexeme.zoneSeparator,
...         'compass',
...         'north',
...         'bounce',
...     ],
...     1
... )
(DecisionSpecifier(domain='main', zone='zone', name='compass'), 5)
def parseDecisionSpecifier(self, specString: str) -> Union[int, exploration.base.DecisionSpecifier]:
2748    def parseDecisionSpecifier(
2749        self,
2750        specString: str
2751    ) -> Union[base.DecisionID, base.DecisionSpecifier]:
2752        """
2753        Parses a full `DecisionSpecifier` from a single string. Can
2754        parse integer decision IDs in string form, and returns a
2755        `DecisionID` in that case, otherwise returns a
2756        `DecisionSpecifier`. Assumes that all int-convertible strings
2757        are decision IDs, so it cannot deal with feature names which are
2758        just numbers.
2759
2760        For example:
2761
2762        >>> pf = ParseFormat()
2763        >>> pf.parseDecisionSpecifier('example')
2764        DecisionSpecifier(domain=None, zone=None, name='example')
2765        >>> pf.parseDecisionSpecifier('outer::example')
2766        DecisionSpecifier(domain=None, zone='outer', name='example')
2767        >>> pf.parseDecisionSpecifier('domain//region::feature')
2768        DecisionSpecifier(domain='domain', zone='region', name='feature')
2769        >>> pf.parseDecisionSpecifier('123')
2770        123
2771        >>> pf.parseDecisionSpecifier('region::domain//feature')
2772        Traceback (most recent call last):
2773        ...
2774        exploration.base.InvalidDecisionSpecifierError...
2775        >>> pf.parseDecisionSpecifier('domain1//domain2//feature')
2776        Traceback (most recent call last):
2777        ...
2778        exploration.base.InvalidDecisionSpecifierError...
2779        >>> pf.parseDecisionSpecifier('domain//123')
2780        Traceback (most recent call last):
2781        ...
2782        exploration.base.InvalidDecisionSpecifierError...
2783        >>> pf.parseDecisionSpecifier('region::123')
2784        Traceback (most recent call last):
2785        ...
2786        exploration.base.InvalidDecisionSpecifierError...
2787        """
2788        try:
2789            return int(specString)
2790        except ValueError:
2791            tokens = self.lex(specString)
2792            result, end = self.parseDecisionSpecifierFromTokens(tokens)
2793            if end != len(tokens) - 1:
2794                raise base.InvalidDecisionSpecifierError(
2795                    f"Junk after end of decision specifier:"
2796                    f"\n{tokens[end + 1:]}"
2797                )
2798            return result

Parses a full DecisionSpecifier from a single string. Can parse integer decision IDs in string form, and returns a DecisionID in that case, otherwise returns a DecisionSpecifier. Assumes that all int-convertible strings are decision IDs, so it cannot deal with feature names which are just numbers.

For example:

>>> pf = ParseFormat()
>>> pf.parseDecisionSpecifier('example')
DecisionSpecifier(domain=None, zone=None, name='example')
>>> pf.parseDecisionSpecifier('outer::example')
DecisionSpecifier(domain=None, zone='outer', name='example')
>>> pf.parseDecisionSpecifier('domain//region::feature')
DecisionSpecifier(domain='domain', zone='region', name='feature')
>>> pf.parseDecisionSpecifier('123')
123
>>> pf.parseDecisionSpecifier('region::domain//feature')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifier('domain1//domain2//feature')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifier('domain//123')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
>>> pf.parseDecisionSpecifier('region::123')
Traceback (most recent call last):
...
exploration.base.InvalidDecisionSpecifierError...
def parseFeatureSpecifierFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, limit: int = -1) -> Tuple[exploration.base.FeatureSpecifier, int]:
2800    def parseFeatureSpecifierFromTokens(
2801        self,
2802        tokens: LexedTokens,
2803        start: int = 0,
2804        limit: int = -1
2805    ) -> Tuple[base.FeatureSpecifier, int]:
2806        """
2807        Parses a `FeatureSpecifier` starting from the specified part of
2808        a tokens list. Returns a tuple containing the feature specifier
2809        and the end position of the end of the feature specifier.
2810
2811        Can parse integer feature IDs in string form, as well as nested
2812        feature specifiers and plain feature specifiers. Assumes that
2813        all int-convertible strings are feature IDs, so it cannot deal
2814        with feature names which are just numbers.
2815
2816        For example:
2817
2818        >>> pf = ParseFormat()
2819        >>> pf.parseFeatureSpecifierFromTokens(['example'])
2820        (FeatureSpecifier(domain=None, within=[], feature='example',\
2821 part=None), 0)
2822        >>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
2823        (FeatureSpecifier(domain=None, within=[], feature='example2',\
2824 part=None), 1)
2825        >>> pf.parseFeatureSpecifierFromTokens(
2826        ...     [
2827        ...         'domain',
2828        ...         Lexeme.domainSeparator,
2829        ...         'region',
2830        ...         Lexeme.zoneSeparator,
2831        ...         'feature',
2832        ...         Lexeme.partSeparator,
2833        ...         'part'
2834        ...     ]
2835        ... )
2836        (FeatureSpecifier(domain='domain', within=['region'],\
2837 feature='feature', part='part'), 6)
2838        >>> pf.parseFeatureSpecifierFromTokens(
2839        ...     [
2840        ...         'outerRegion',
2841        ...         Lexeme.zoneSeparator,
2842        ...         'midRegion',
2843        ...         Lexeme.zoneSeparator,
2844        ...         'innerRegion',
2845        ...         Lexeme.zoneSeparator,
2846        ...         'feature'
2847        ...     ]
2848        ... )
2849        (FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
2850 'innerRegion'], feature='feature', part=None), 6)
2851        >>> pf.parseFeatureSpecifierFromTokens(
2852        ...     [
2853        ...         'outerRegion',
2854        ...         Lexeme.zoneSeparator,
2855        ...         'midRegion',
2856        ...         Lexeme.zoneSeparator,
2857        ...         'innerRegion',
2858        ...         Lexeme.zoneSeparator,
2859        ...         'feature'
2860        ...     ],
2861        ...     1
2862        ... )
2863        Traceback (most recent call last):
2864        ...
2865        exploration.parsing.InvalidFeatureSpecifierError...
2866        >>> pf.parseFeatureSpecifierFromTokens(
2867        ...     [
2868        ...         'outerRegion',
2869        ...         Lexeme.zoneSeparator,
2870        ...         'midRegion',
2871        ...         Lexeme.zoneSeparator,
2872        ...         'innerRegion',
2873        ...         Lexeme.zoneSeparator,
2874        ...         'feature'
2875        ...     ],
2876        ...     2
2877        ... )
2878        (FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'],\
2879 feature='feature', part=None), 6)
2880        >>> pf.parseFeatureSpecifierFromTokens(
2881        ...     [
2882        ...         'outerRegion',
2883        ...         Lexeme.zoneSeparator,
2884        ...         'feature',
2885        ...         Lexeme.domainSeparator,
2886        ...         'after',
2887        ...     ]
2888        ... )
2889        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2890 feature='feature', part=None), 2)
2891        >>> pf.parseFeatureSpecifierFromTokens(
2892        ...     [
2893        ...         'outerRegion',
2894        ...         Lexeme.zoneSeparator,
2895        ...         'feature',
2896        ...         Lexeme.domainSeparator,
2897        ...         'after',
2898        ...     ],
2899        ...     2
2900        ... )
2901        (FeatureSpecifier(domain='feature', within=[], feature='after',\
2902 part=None), 4)
2903        >>> # Including a limit:
2904        >>> pf.parseFeatureSpecifierFromTokens(
2905        ...     [
2906        ...         'outerRegion',
2907        ...         Lexeme.zoneSeparator,
2908        ...         'midRegion',
2909        ...         Lexeme.zoneSeparator,
2910        ...         'feature',
2911        ...     ],
2912        ...     0,
2913        ...     2
2914        ... )
2915        (FeatureSpecifier(domain=None, within=['outerRegion'],\
2916 feature='midRegion', part=None), 2)
2917        >>> pf.parseFeatureSpecifierFromTokens(
2918        ...     [
2919        ...         'outerRegion',
2920        ...         Lexeme.zoneSeparator,
2921        ...         'midRegion',
2922        ...         Lexeme.zoneSeparator,
2923        ...         'feature',
2924        ...     ],
2925        ...     0,
2926        ...     0
2927        ... )
2928        (FeatureSpecifier(domain=None, within=[], feature='outerRegion',\
2929 part=None), 0)
2930        >>> pf.parseFeatureSpecifierFromTokens(
2931        ...     [
2932        ...         'region',
2933        ...         Lexeme.zoneSeparator,
2934        ...         Lexeme.zoneSeparator,
2935        ...         'feature',
2936        ...     ]
2937        ... )
2938        (FeatureSpecifier(domain=None, within=[], feature='region',\
2939 part=None), 0)
2940        """
2941        start, limit, nTokens = normalizeEnds(tokens, start, limit)
2942
2943        if nTokens == 0:
2944            raise InvalidFeatureSpecifierError(
2945                "Can't parse a feature specifier from 0 tokens."
2946            )
2947        first = tokens[start]
2948        if isinstance(first, Lexeme):
2949            raise InvalidFeatureSpecifierError(
2950                f"Feature specifier can't begin with a special token."
2951                f"Got:\n{tokens[start:limit + 1]}"
2952            )
2953
2954        if nTokens in (1, 2):
2955            # 2 tokens isn't enough for a second part
2956            fs = base.FeatureSpecifier(
2957                domain=None,
2958                within=[],
2959                feature=first,
2960                part=None
2961            )
2962            return (base.normalizeFeatureSpecifier(fs), start)
2963
2964        firstSep = tokens[start + 1]
2965        secondPart = tokens[start + 2]
2966
2967        if (
2968            firstSep not in (
2969                Lexeme.domainSeparator,
2970                Lexeme.zoneSeparator,
2971                Lexeme.partSeparator
2972            )
2973         or not isinstance(secondPart, str)
2974        ):
2975            # Following tokens won't work out
2976            fs = base.FeatureSpecifier(
2977                domain=None,
2978                within=[],
2979                feature=first,
2980                part=None
2981            )
2982            return (base.normalizeFeatureSpecifier(fs), start)
2983
2984        if firstSep == Lexeme.domainSeparator:
2985            if start + 2 > limit:
2986                return (
2987                    base.FeatureSpecifier(
2988                        domain=first,
2989                        within=[],
2990                        feature=secondPart,
2991                        part=None
2992                    ),
2993                    start + 2
2994                )
2995            else:
2996                rest, restEnd = self.parseFeatureSpecifierFromTokens(
2997                    tokens,
2998                    start + 2,
2999                    limit
3000                )
3001                if rest.domain is not None:  # two domainSeparators in a row
3002                    fs = base.FeatureSpecifier(
3003                        domain=first,
3004                        within=[],
3005                        feature=rest.domain,
3006                        part=None
3007                    )
3008                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3009                else:
3010                    fs = base.FeatureSpecifier(
3011                        domain=first,
3012                        within=rest.within,
3013                        feature=rest.feature,
3014                        part=rest.part
3015                    )
3016                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3017
3018        elif firstSep == Lexeme.zoneSeparator:
3019            if start + 2 > limit:
3020                fs = base.FeatureSpecifier(
3021                    domain=None,
3022                    within=[first],
3023                    feature=secondPart,
3024                    part=None
3025                )
3026                return (base.normalizeFeatureSpecifier(fs), start + 2)
3027            else:
3028                rest, restEnd = self.parseFeatureSpecifierFromTokens(
3029                    tokens,
3030                    start + 2,
3031                    limit
3032                )
3033                if rest.domain is not None:  # domain sep after zone sep
3034                    fs = base.FeatureSpecifier(
3035                        domain=None,
3036                        within=[first],
3037                        feature=rest.domain,
3038                        part=None
3039                    )
3040                    return (base.normalizeFeatureSpecifier(fs), start + 2)
3041                else:
3042                    within = [first]
3043                    within.extend(rest.within)
3044                    fs = base.FeatureSpecifier(
3045                        domain=None,
3046                        within=within,
3047                        feature=rest.feature,
3048                        part=rest.part
3049                    )
3050                    return (base.normalizeFeatureSpecifier(fs), restEnd)
3051
3052        else:  # must be partSeparator
3053            fs = base.FeatureSpecifier(
3054                domain=None,
3055                within=[],
3056                feature=first,
3057                part=secondPart
3058            )
3059            return (base.normalizeFeatureSpecifier(fs), start + 2)

Parses a FeatureSpecifier starting from the specified part of a tokens list. Returns a tuple containing the feature specifier and the end position of the end of the feature specifier.

Can parse integer feature IDs in string form, as well as nested feature specifiers and plain feature specifiers. Assumes that all int-convertible strings are feature IDs, so it cannot deal with feature names which are just numbers.

For example:

>>> pf = ParseFormat()
>>> pf.parseFeatureSpecifierFromTokens(['example'])
(FeatureSpecifier(domain=None, within=[], feature='example', part=None), 0)
>>> pf.parseFeatureSpecifierFromTokens(['example1', 'example2'], 1)
(FeatureSpecifier(domain=None, within=[], feature='example2', part=None), 1)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'domain',
...         Lexeme.domainSeparator,
...         'region',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.partSeparator,
...         'part'
...     ]
... )
(FeatureSpecifier(domain='domain', within=['region'], feature='feature', part='part'), 6)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'innerRegion',
...         Lexeme.zoneSeparator,
...         'feature'
...     ]
... )
(FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion', 'innerRegion'], feature='feature', part=None), 6)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'innerRegion',
...         Lexeme.zoneSeparator,
...         'feature'
...     ],
...     1
... )
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'innerRegion',
...         Lexeme.zoneSeparator,
...         'feature'
...     ],
...     2
... )
(FeatureSpecifier(domain=None, within=['midRegion', 'innerRegion'], feature='feature', part=None), 6)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.domainSeparator,
...         'after',
...     ]
... )
(FeatureSpecifier(domain=None, within=['outerRegion'], feature='feature', part=None), 2)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.domainSeparator,
...         'after',
...     ],
...     2
... )
(FeatureSpecifier(domain='feature', within=[], feature='after', part=None), 4)
>>> # Including a limit:
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...     ],
...     0,
...     2
... )
(FeatureSpecifier(domain=None, within=['outerRegion'], feature='midRegion', part=None), 2)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'outerRegion',
...         Lexeme.zoneSeparator,
...         'midRegion',
...         Lexeme.zoneSeparator,
...         'feature',
...     ],
...     0,
...     0
... )
(FeatureSpecifier(domain=None, within=[], feature='outerRegion', part=None), 0)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'region',
...         Lexeme.zoneSeparator,
...         Lexeme.zoneSeparator,
...         'feature',
...     ]
... )
(FeatureSpecifier(domain=None, within=[], feature='region', part=None), 0)
def parseFeatureSpecifier(self, specString: str) -> exploration.base.FeatureSpecifier:
3061    def parseFeatureSpecifier(self, specString: str) -> base.FeatureSpecifier:
3062        """
3063        Parses a full `FeatureSpecifier` from a single string. See
3064        `parseFeatureSpecifierFromTokens`.
3065
3066        >>> pf = ParseFormat()
3067        >>> pf.parseFeatureSpecifier('example')
3068        FeatureSpecifier(domain=None, within=[], feature='example', part=None)
3069        >>> pf.parseFeatureSpecifier('outer::example')
3070        FeatureSpecifier(domain=None, within=['outer'], feature='example',\
3071 part=None)
3072        >>> pf.parseFeatureSpecifier('example%%middle')
3073        FeatureSpecifier(domain=None, within=[], feature='example',\
3074 part='middle')
3075        >>> pf.parseFeatureSpecifier('domain//region::feature%%part')
3076        FeatureSpecifier(domain='domain', within=['region'],\
3077 feature='feature', part='part')
3078        >>> pf.parseFeatureSpecifier(
3079        ...     'outerRegion::midRegion::innerRegion::feature'
3080        ... )
3081        FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion',\
3082 'innerRegion'], feature='feature', part=None)
3083        >>> pf.parseFeatureSpecifier('region::domain//feature')
3084        Traceback (most recent call last):
3085        ...
3086        exploration.parsing.InvalidFeatureSpecifierError...
3087        >>> pf.parseFeatureSpecifier('feature%%part1%%part2')
3088        Traceback (most recent call last):
3089        ...
3090        exploration.parsing.InvalidFeatureSpecifierError...
3091        >>> pf.parseFeatureSpecifier('domain1//domain2//feature')
3092        Traceback (most recent call last):
3093        ...
3094        exploration.parsing.InvalidFeatureSpecifierError...
3095        >>> # TODO: Issue warnings for these...
3096        >>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
3097        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3098        >>> pf.parseFeatureSpecifier('region::123')  # zone discarded
3099        FeatureSpecifier(domain=None, within=[], feature=123, part=None)
3100        >>> pf.parseFeatureSpecifier('123%%part')
3101        FeatureSpecifier(domain=None, within=[], feature=123, part='part')
3102        """
3103        tokens = self.lex(specString)
3104        result, rEnd = self.parseFeatureSpecifierFromTokens(tokens)
3105        if rEnd != len(tokens) - 1:
3106            raise InvalidFeatureSpecifierError(
3107                f"Feature specifier has extra stuff at end:"
3108                f" {tokens[rEnd + 1:]}"
3109            )
3110        else:
3111            return result

Parses a full FeatureSpecifier from a single string. See parseFeatureSpecifierFromTokens.

>>> pf = ParseFormat()
>>> pf.parseFeatureSpecifier('example')
FeatureSpecifier(domain=None, within=[], feature='example', part=None)
>>> pf.parseFeatureSpecifier('outer::example')
FeatureSpecifier(domain=None, within=['outer'], feature='example', part=None)
>>> pf.parseFeatureSpecifier('example%%middle')
FeatureSpecifier(domain=None, within=[], feature='example', part='middle')
>>> pf.parseFeatureSpecifier('domain//region::feature%%part')
FeatureSpecifier(domain='domain', within=['region'], feature='feature', part='part')
>>> pf.parseFeatureSpecifier(
...     'outerRegion::midRegion::innerRegion::feature'
... )
FeatureSpecifier(domain=None, within=['outerRegion', 'midRegion', 'innerRegion'], feature='feature', part=None)
>>> pf.parseFeatureSpecifier('region::domain//feature')
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> pf.parseFeatureSpecifier('feature%%part1%%part2')
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> pf.parseFeatureSpecifier('domain1//domain2//feature')
Traceback (most recent call last):
...
InvalidFeatureSpecifierError...
>>> # TODO: Issue warnings for these...
>>> pf.parseFeatureSpecifier('domain//123')  # domain discarded
FeatureSpecifier(domain=None, within=[], feature=123, part=None)
>>> pf.parseFeatureSpecifier('region::123')  # zone discarded
FeatureSpecifier(domain=None, within=[], feature=123, part=None)
>>> pf.parseFeatureSpecifier('123%%part')
FeatureSpecifier(domain=None, within=[], feature=123, part='part')
def normalizeFeatureSpecifier( self, spec: Union[int, str, exploration.base.FeatureSpecifier]) -> exploration.base.FeatureSpecifier:
3113    def normalizeFeatureSpecifier(
3114        self,
3115        spec: base.AnyFeatureSpecifier
3116    ) -> base.FeatureSpecifier:
3117        """
3118        Normalizes any kind of feature specifier into an official
3119        `FeatureSpecifier` tuple.
3120
3121        For example:
3122
3123        >>> pf = ParseFormat()
3124        >>> pf.normalizeFeatureSpecifier('town')
3125        FeatureSpecifier(domain=None, within=[], feature='town', part=None)
3126        >>> pf.normalizeFeatureSpecifier(5)
3127        FeatureSpecifier(domain=None, within=[], feature=5, part=None)
3128        >>> pf.parseFeatureSpecifierFromTokens(
3129        ...     [
3130        ...         'domain',
3131        ...         Lexeme.domainSeparator,
3132        ...         'region',
3133        ...         Lexeme.zoneSeparator,
3134        ...         'feature',
3135        ...         Lexeme.partSeparator,
3136        ...         'part'
3137        ...     ]
3138        ... )
3139        (FeatureSpecifier(domain='domain', within=['region'],\
3140 feature='feature', part='part'), 6)
3141        >>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
3142        FeatureSpecifier(domain='dom', within=['one', 'two'],\
3143 feature='three', part='middle')
3144        >>> pf.normalizeFeatureSpecifier(
3145        ...   base.FeatureSpecifier(None, ['region'], 'place', None)
3146        ... )
3147        FeatureSpecifier(domain=None, within=['region'], feature='place',\
3148 part=None)
3149        >>> fs = base.FeatureSpecifier(None, [], 'place', None)
3150        >>> ns = pf.normalizeFeatureSpecifier(fs)
3151        >>> ns is fs  # Doesn't create unnecessary clones
3152        True
3153        """
3154        if isinstance(spec, base.FeatureSpecifier):
3155            return spec
3156        elif isinstance(spec, base.FeatureID):
3157            return base.FeatureSpecifier(None, [], spec, None)
3158        elif isinstance(spec, str):
3159            return self.parseFeatureSpecifier(spec)
3160        else:
3161            raise TypeError(f"Invalid feature specifier type: '{type(spec)}'")

Normalizes any kind of feature specifier into an official FeatureSpecifier tuple.

For example:

>>> pf = ParseFormat()
>>> pf.normalizeFeatureSpecifier('town')
FeatureSpecifier(domain=None, within=[], feature='town', part=None)
>>> pf.normalizeFeatureSpecifier(5)
FeatureSpecifier(domain=None, within=[], feature=5, part=None)
>>> pf.parseFeatureSpecifierFromTokens(
...     [
...         'domain',
...         Lexeme.domainSeparator,
...         'region',
...         Lexeme.zoneSeparator,
...         'feature',
...         Lexeme.partSeparator,
...         'part'
...     ]
... )
(FeatureSpecifier(domain='domain', within=['region'], feature='feature', part='part'), 6)
>>> pf.normalizeFeatureSpecifier('dom//one::two::three%%middle')
FeatureSpecifier(domain='dom', within=['one', 'two'], feature='three', part='middle')
>>> pf.normalizeFeatureSpecifier(
...   base.FeatureSpecifier(None, ['region'], 'place', None)
... )
FeatureSpecifier(domain=None, within=['region'], feature='place', part=None)
>>> fs = base.FeatureSpecifier(None, [], 'place', None)
>>> ns = pf.normalizeFeatureSpecifier(fs)
>>> ns is fs  # Doesn't create unnecessary clones
True
def unparseChallenge(self, challenge: exploration.base.Challenge) -> str:
3163    def unparseChallenge(self, challenge: base.Challenge) -> str:
3164        """
3165        Turns a `base.Challenge` into a string that can be turned back
3166        into an equivalent challenge by `parseChallenge`. For example:
3167
3168        >>> pf = ParseFormat()
3169        >>> c = base.challenge(
3170        ...     skills=base.BestSkill('brains', 'brawn'),
3171        ...     level=2,
3172        ...     success=[base.effect(set=('switch', 'on'))],
3173        ...     failure=[
3174        ...         base.effect(deactivate=True, delay=1),
3175        ...         base.effect(bounce=True)
3176        ...     ],
3177        ...     outcome=True
3178        ... )
3179        >>> r = pf.unparseChallenge(c)
3180        >>> r
3181        '<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
3182        >>> pf.parseChallenge(r) == c
3183        True
3184        >>> c2 = base.challenge(
3185        ...     skills=base.CombinedSkill(
3186        ...         -2,
3187        ...         base.ConditionalSkill(
3188        ...             base.ReqCapability('tough'),
3189        ...             base.BestSkill(1),
3190        ...             base.BestSkill(-1)
3191        ...         )
3192        ...     ),
3193        ...     level=-2,
3194        ...     success=[base.effect(gain='orb')],
3195        ...     failure=[],
3196        ...     outcome=None
3197        ... )
3198        >>> r2 = pf.unparseChallenge(c2)
3199        >>> r2
3200        '<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
3201        >>> # TODO: let this parse through without BestSkills...
3202        >>> pf.parseChallenge(r2) == c2
3203        True
3204        """
3205        lt = self.formatDict[Lexeme.angleLeft]
3206        gt = self.formatDict[Lexeme.angleRight]
3207        result = (
3208            lt + str(challenge['level']) + gt
3209          + challenge['skills'].unparse()
3210        )
3211        if challenge['outcome'] is True:
3212            result += gt
3213        result += self.unparseConsequence(challenge['success'])
3214        if challenge['outcome'] is False:
3215            result += gt
3216        result += self.unparseConsequence(challenge['failure'])
3217        return result

Turns a base.Challenge into a string that can be turned back into an equivalent challenge by parseChallenge. For example:

>>> pf = ParseFormat()
>>> c = base.challenge(
...     skills=base.BestSkill('brains', 'brawn'),
...     level=2,
...     success=[base.effect(set=('switch', 'on'))],
...     failure=[
...         base.effect(deactivate=True, delay=1),
...         base.effect(bounce=True)
...     ],
...     outcome=True
... )
>>> r = pf.unparseChallenge(c)
>>> r
'<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}'
>>> pf.parseChallenge(r) == c
True
>>> c2 = base.challenge(
...     skills=base.CombinedSkill(
...         -2,
...         base.ConditionalSkill(
...             base.ReqCapability('tough'),
...             base.BestSkill(1),
...             base.BestSkill(-1)
...         )
...     ),
...     level=-2,
...     success=[base.effect(gain='orb')],
...     failure=[],
...     outcome=None
... )
>>> r2 = pf.unparseChallenge(c2)
>>> r2
'<-2>sum(-2, if(tough, best(1), best(-1))){gain orb}{}'
>>> # TODO: let this parse through without BestSkills...
>>> pf.parseChallenge(r2) == c2
True
def unparseCondition(self, condition: exploration.base.Condition) -> str:
3219    def unparseCondition(self, condition: base.Condition) -> str:
3220        """
3221        Given a `base.Condition` returns a string that would result in
3222        that condition if given to `parseCondition`. For example:
3223
3224        >>> pf = ParseFormat()
3225        >>> c = base.condition(
3226        ...     condition=base.ReqAny([
3227        ...         base.ReqCapability('brawny'),
3228        ...         base.ReqNot(base.ReqTokens('weights', 3))
3229        ...     ]),
3230        ...     consequence=[base.effect(gain='power')]
3231        ... )
3232        >>> r = pf.unparseCondition(c)
3233        >>> r
3234        '??((brawny|!(weights*3))){gain power}{}'
3235        >>> pf.parseCondition(r) == c
3236        True
3237        """
3238        return (
3239            self.formatDict[Lexeme.doubleQuestionmark]
3240          + self.formatDict[Lexeme.openParen]
3241          + condition['condition'].unparse()
3242          + self.formatDict[Lexeme.closeParen]
3243          + self.unparseConsequence(condition['consequence'])
3244          + self.unparseConsequence(condition['alternative'])
3245        )

Given a base.Condition returns a string that would result in that condition if given to parseCondition. For example:

>>> pf = ParseFormat()
>>> c = base.condition(
...     condition=base.ReqAny([
...         base.ReqCapability('brawny'),
...         base.ReqNot(base.ReqTokens('weights', 3))
...     ]),
...     consequence=[base.effect(gain='power')]
... )
>>> r = pf.unparseCondition(c)
>>> r
'??((brawny|!(weights*3))){gain power}{}'
>>> pf.parseCondition(r) == c
True
def unparseConsequence( self, consequence: List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]) -> str:
3247    def unparseConsequence(self, consequence: base.Consequence) -> str:
3248        """
3249        Given a `base.Consequence`, returns a string encoding of it,
3250        using the same format that `parseConsequence` will parse. Uses
3251        function-call-like syntax and curly braces to denote different
3252        sub-consequences. See also `SkillCombination.unparse` and
3253        `Requirement.unparse` For example:
3254
3255        >>> pf = ParseFormat()
3256        >>> c = [base.effect(gain='one'), base.effect(lose='one')]
3257        >>> pf.unparseConsequence(c)
3258        '{gain one; lose one}'
3259        >>> c = [
3260        ...     base.challenge(
3261        ...         skills=base.BestSkill('brains', 'brawn'),
3262        ...         level=2,
3263        ...         success=[base.effect(set=('switch', 'on'))],
3264        ...         failure=[
3265        ...             base.effect(deactivate=True, delay=1),
3266        ...             base.effect(bounce=True)
3267        ...         ],
3268        ...         outcome=True
3269        ...     )
3270        ... ]
3271        >>> pf.unparseConsequence(c)
3272        '{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
3273        >>> c[0]['outcome'] = False
3274        >>> pf.unparseConsequence(c)
3275        '{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
3276        >>> c[0]['outcome'] = None
3277        >>> pf.unparseConsequence(c)
3278        '{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
3279        >>> c = [
3280        ...     base.condition(
3281        ...         condition=base.ReqAny([
3282        ...             base.ReqCapability('brawny'),
3283        ...             base.ReqNot(base.ReqTokens('weights', 3))
3284        ...         ]),
3285        ...         consequence=[
3286        ...             base.challenge(
3287        ...                 skills=base.CombinedSkill('brains', 'brawn'),
3288        ...                 level=3,
3289        ...                 success=[base.effect(goto='home')],
3290        ...                 failure=[base.effect(bounce=True)],
3291        ...                 outcome=None
3292        ...             )
3293        ...         ]  # no alternative -> empty list
3294        ...     )
3295        ... ]
3296        >>> pf.unparseConsequence(c)
3297        '{??((brawny|!(weights*3))){\
3298<3>sum(brains, brawn){goto home}{bounce}}{}}'
3299        >>> c = [base.effect(gain='if(power){gain "mimic"}')]
3300        >>> # TODO: Make this work!
3301        >>> # pf.unparseConsequence(c)
3302
3303        '{gain "if(power){gain \\\\"mimic\\\\"}"}'
3304        """
3305        result = self.formatDict[Lexeme.openCurly]
3306        for item in consequence:
3307            if 'skills' in item:  # a Challenge
3308                item = cast(base.Challenge, item)
3309                result += self.unparseChallenge(item)
3310
3311            elif 'value' in item:  # an Effect
3312                item = cast(base.Effect, item)
3313                result += self.unparseEffect(item)
3314
3315            elif 'condition' in item:  # a Condition
3316                item = cast(base.Condition, item)
3317                result += self.unparseCondition(item)
3318
3319            else:  # bad dict
3320                raise TypeError(
3321                    f"Invalid consequence: items in the list must be"
3322                    f" Effects, Challenges, or Conditions (got a dictionary"
3323                    f" without 'skills', 'value', or 'condition' keys)."
3324                    f"\nGot item: {repr(item)}"
3325                )
3326            result += '; '
3327
3328        if result.endswith('; '):
3329            result = result[:-2]
3330
3331        return result + self.formatDict[Lexeme.closeCurly]

Given a base.Consequence, returns a string encoding of it, using the same format that parseConsequence will parse. Uses function-call-like syntax and curly braces to denote different sub-consequences. See also SkillCombination.unparse and Requirement.unparse For example:

>>> pf = ParseFormat()
>>> c = [base.effect(gain='one'), base.effect(lose='one')]
>>> pf.unparseConsequence(c)
'{gain one; lose one}'
>>> c = [
...     base.challenge(
...         skills=base.BestSkill('brains', 'brawn'),
...         level=2,
...         success=[base.effect(set=('switch', 'on'))],
...         failure=[
...             base.effect(deactivate=True, delay=1),
...             base.effect(bounce=True)
...         ],
...         outcome=True
...     )
... ]
>>> pf.unparseConsequence(c)
'{<2>best(brains, brawn)>{set switch:on}{deactivate ,1; bounce}}'
>>> c[0]['outcome'] = False
>>> pf.unparseConsequence(c)
'{<2>best(brains, brawn){set switch:on}>{deactivate ,1; bounce}}'
>>> c[0]['outcome'] = None
>>> pf.unparseConsequence(c)
'{<2>best(brains, brawn){set switch:on}{deactivate ,1; bounce}}'
>>> c = [
...     base.condition(
...         condition=base.ReqAny([
...             base.ReqCapability('brawny'),
...             base.ReqNot(base.ReqTokens('weights', 3))
...         ]),
...         consequence=[
...             base.challenge(
...                 skills=base.CombinedSkill('brains', 'brawn'),
...                 level=3,
...                 success=[base.effect(goto='home')],
...                 failure=[base.effect(bounce=True)],
...                 outcome=None
...             )
...         ]  # no alternative -> empty list
...     )
... ]
>>> pf.unparseConsequence(c)
'{??((brawny|!(weights*3))){<3>sum(brains, brawn){goto home}{bounce}}{}}'
>>> c = [base.effect(gain='if(power){gain "mimic"}')]
>>> # TODO: Make this work!
>>> # pf.unparseConsequence(c)

'{gain "if(power){gain \"mimic\"}"}'

def parseMechanismSpecifierFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0) -> Tuple[exploration.base.MechanismSpecifier, int]:
3333    def parseMechanismSpecifierFromTokens(
3334        self,
3335        tokens: LexedTokens,
3336        start: int = 0
3337    ) -> Tuple[base.MechanismSpecifier, int]:
3338        """
3339        Parses a mechanism specifier starting at the specified position
3340        in the given tokens list. No ending position is specified, but
3341        instead this function returns a tuple containing the parsed
3342        `base.MechanismSpecifier` along with an index in the tokens list
3343        where the end of the specifier was found.
3344
3345        For example:
3346
3347        >>> pf = ParseFormat()
3348        >>> pf.parseMechanismSpecifierFromTokens(['m'])
3349        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3350 name='m'), 0)
3351        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
3352        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3353 name='a'), 0)
3354        >>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
3355        (MechanismSpecifier(domain=None, zone=None, decision=None,\
3356 name='m'), 1)
3357        >>> pf.parseMechanismSpecifierFromTokens(
3358        ...     ['a', Lexeme.domainSeparator, 'm']
3359        ... )
3360        (MechanismSpecifier(domain='a', zone=None, decision=None,\
3361 name='m'), 2)
3362        >>> pf.parseMechanismSpecifierFromTokens(
3363        ...     ['a', Lexeme.zoneSeparator, 'm']
3364        ... )
3365        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3366 name='m'), 2)
3367        >>> pf.parseMechanismSpecifierFromTokens(
3368        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
3369        ... )
3370        (MechanismSpecifier(domain=None, zone='a', decision='b',\
3371 name='m'), 4)
3372        >>> pf.parseMechanismSpecifierFromTokens(
3373        ...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
3374        ... )
3375        (MechanismSpecifier(domain='a', zone=None, decision='b',\
3376 name='m'), 4)
3377        >>> pf.parseMechanismSpecifierFromTokens(
3378        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
3379        ... )
3380        (MechanismSpecifier(domain=None, zone=None, decision='a',\
3381 name='b'), 2)
3382        >>> pf.parseMechanismSpecifierFromTokens(
3383        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3384        ...     1
3385        ... )
3386        Traceback (most recent call last):
3387        ...
3388        exploration.parsing.ParseError...
3389        >>> pf.parseMechanismSpecifierFromTokens(
3390        ...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
3391        ...     2
3392        ... )
3393        (MechanismSpecifier(domain='b', zone=None, decision=None,\
3394 name='m'), 4)
3395        >>> pf.parseMechanismSpecifierFromTokens(
3396        ...     [
3397        ...         'a',
3398        ...         Lexeme.domainSeparator,
3399        ...         'b',
3400        ...         Lexeme.zoneSeparator,
3401        ...         'c',
3402        ...         Lexeme.zoneSeparator,
3403        ...         'm'
3404        ...     ]
3405        ... )
3406        (MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
3407        >>> pf.parseMechanismSpecifierFromTokens(
3408        ...     [
3409        ...         'a',
3410        ...         Lexeme.domainSeparator,
3411        ...         'b',
3412        ...         Lexeme.zoneSeparator,
3413        ...         'c',
3414        ...         Lexeme.zoneSeparator,
3415        ...         'm'
3416        ...     ],
3417        ...     2
3418        ... )
3419        (MechanismSpecifier(domain=None, zone='b', decision='c',\
3420 name='m'), 6)
3421        >>> pf.parseMechanismSpecifierFromTokens(
3422        ...     [
3423        ...         'a',
3424        ...         Lexeme.domainSeparator,
3425        ...         'b',
3426        ...         Lexeme.zoneSeparator,
3427        ...         'c',
3428        ...         Lexeme.zoneSeparator,
3429        ...         'm'
3430        ...     ],
3431        ...     4
3432        ... )
3433        (MechanismSpecifier(domain=None, zone=None, decision='c',\
3434 name='m'), 6)
3435        >>> pf.parseMechanismSpecifierFromTokens(
3436        ...     [
3437        ...         'roomB',
3438        ...         Lexeme.zoneSeparator,
3439        ...         'switch',
3440        ...         Lexeme.mechanismSeparator,
3441        ...         'on'
3442        ...     ]
3443        ... )
3444        (MechanismSpecifier(domain=None, zone=None, decision='roomB',\
3445 name='switch'), 2)
3446        """
3447        start, tEnd, nLeft = normalizeEnds(tokens, start, -1)
3448
3449        try:
3450            dSpec, dEnd = self.parseDecisionSpecifierFromTokens(
3451                tokens,
3452                start
3453            )
3454        except ParseError:
3455            raise ParseError(
3456                "Failed to parse mechanism specifier couldn't parse"
3457                " initial mechanism name."
3458            )
3459
3460        if isinstance(dSpec, int):
3461            raise ParseError(
3462                f"Invalid mechanism specifier: cannot use a decision ID"
3463                f" as the decision part. Got: {tokens[start:]}"
3464            )
3465            # TODO: Allow that?
3466
3467        mDomain = dSpec.domain
3468        if dEnd == tEnd or dEnd == tEnd - 1:
3469            return (
3470                base.MechanismSpecifier(
3471                    domain=mDomain,
3472                    zone=None,
3473                    decision=dSpec.zone,
3474                    name=dSpec.name
3475                ),
3476                dEnd
3477            )
3478
3479        sep = tokens[dEnd + 1]
3480        after = tokens[dEnd + 2]
3481
3482        if sep == Lexeme.zoneSeparator:
3483            if isinstance(after, Lexeme):
3484                return (
3485                    base.MechanismSpecifier(
3486                        domain=mDomain,
3487                        zone=None,
3488                        decision=dSpec.zone,
3489                        name=dSpec.name
3490                    ),
3491                    dEnd
3492                )
3493            else:
3494                return (
3495                    base.MechanismSpecifier(
3496                        domain=mDomain,
3497                        zone=dSpec.zone,
3498                        decision=dSpec.name,
3499                        name=after
3500                    ),
3501                    dEnd + 2
3502                )
3503        else:
3504            return (
3505                base.MechanismSpecifier(
3506                    domain=mDomain,
3507                    zone=None,
3508                    decision=dSpec.zone,
3509                    name=dSpec.name
3510                ),
3511                dEnd
3512            )

Parses a mechanism specifier starting at the specified position in the given tokens list. No ending position is specified, but instead this function returns a tuple containing the parsed base.MechanismSpecifier along with an index in the tokens list where the end of the specifier was found.

For example:

>>> pf = ParseFormat()
>>> pf.parseMechanismSpecifierFromTokens(['m'])
(MechanismSpecifier(domain=None, zone=None, decision=None, name='m'), 0)
>>> pf.parseMechanismSpecifierFromTokens(['a', 'm'])
(MechanismSpecifier(domain=None, zone=None, decision=None, name='a'), 0)
>>> pf.parseMechanismSpecifierFromTokens(['a', 'm'], 1)
(MechanismSpecifier(domain=None, zone=None, decision=None, name='m'), 1)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'm']
... )
(MechanismSpecifier(domain='a', zone=None, decision=None, name='m'), 2)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'm']
... )
(MechanismSpecifier(domain=None, zone=None, decision='a', name='m'), 2)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(MechanismSpecifier(domain=None, zone='a', decision='b', name='m'), 4)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.domainSeparator, 'b', Lexeme.zoneSeparator, 'm']
... )
(MechanismSpecifier(domain='a', zone=None, decision='b', name='m'), 4)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm']
... )
(MechanismSpecifier(domain=None, zone=None, decision='a', name='b'), 2)
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     1
... )
Traceback (most recent call last):
...
ParseError...
>>> pf.parseMechanismSpecifierFromTokens(
...     ['a', Lexeme.zoneSeparator, 'b', Lexeme.domainSeparator, 'm'],
...     2
... )
(MechanismSpecifier(domain='b', zone=None, decision=None, name='m'), 4)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ]
... )
(MechanismSpecifier(domain='a', zone='b', decision='c', name='m'), 6)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     2
... )
(MechanismSpecifier(domain=None, zone='b', decision='c', name='m'), 6)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'a',
...         Lexeme.domainSeparator,
...         'b',
...         Lexeme.zoneSeparator,
...         'c',
...         Lexeme.zoneSeparator,
...         'm'
...     ],
...     4
... )
(MechanismSpecifier(domain=None, zone=None, decision='c', name='m'), 6)
>>> pf.parseMechanismSpecifierFromTokens(
...     [
...         'roomB',
...         Lexeme.zoneSeparator,
...         'switch',
...         Lexeme.mechanismSeparator,
...         'on'
...     ]
... )
(MechanismSpecifier(domain=None, zone=None, decision='roomB', name='switch'), 2)
def groupReqTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> List[Union[Lexeme, str, List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]]]:
3514    def groupReqTokens(
3515        self,
3516        tokens: LexedTokens,
3517        start: int = 0,
3518        end: int = -1
3519    ) -> GroupedTokens:
3520        """
3521        Groups tokens for a requirement, stripping out all parentheses
3522        but replacing parenthesized expressions with sub-lists of tokens.
3523
3524        For example:
3525
3526        >>> pf = ParseFormat()
3527        >>> pf.groupReqTokens(['jump'])
3528        ['jump']
3529        >>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
3530        Traceback (most recent call last):
3531        ...
3532        exploration.parsing.ParseError...
3533        >>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
3534        Traceback (most recent call last):
3535        ...
3536        exploration.parsing.ParseError...
3537        >>> pf.groupReqTokens(['jump', Lexeme.closeParen])
3538        Traceback (most recent call last):
3539        ...
3540        exploration.parsing.ParseError...
3541        >>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
3542        [['jump']]
3543        >>> pf.groupReqTokens(
3544        ...     [
3545        ...         Lexeme.openParen,
3546        ...         'jump',
3547        ...         Lexeme.orBar,
3548        ...         'climb',
3549        ...         Lexeme.closeParen,
3550        ...         Lexeme.ampersand,
3551        ...         'crawl',
3552        ...     ]
3553        ... )
3554        [['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>,\
3555 'crawl']
3556        """
3557        start, end, nTokens = normalizeEnds(tokens, start, end)
3558        if nTokens == 0:
3559            raise ParseError("Ran out of tokens.")
3560
3561        resultsStack: List[GroupedTokens] = [[]]
3562        here = start
3563        while here <= end:
3564            token = tokens[here]
3565            here += 1
3566            if token == Lexeme.closeParen:
3567                if len(resultsStack) == 1:
3568                    raise ParseError(
3569                        f"Too many closing parens at index {here - 1}"
3570                        f" in:\n{tokens[start:end + 1]}"
3571                    )
3572                else:
3573                    closed = resultsStack.pop()
3574                    resultsStack[-1].append(closed)
3575            elif token == Lexeme.openParen:
3576                resultsStack.append([])
3577            else:
3578                resultsStack[-1].append(token)
3579        if len(resultsStack) != 1:
3580            raise ParseError(
3581                f"Mismatched parentheses in tokens:"
3582                f"\n{tokens[start:end + 1]}"
3583            )
3584        return resultsStack[0]

Groups tokens for a requirement, stripping out all parentheses but replacing parenthesized expressions with sub-lists of tokens.

For example:

>>> pf = ParseFormat()
>>> pf.groupReqTokens(['jump'])
['jump']
>>> pf.groupReqTokens([Lexeme.openParen, 'jump'])
Traceback (most recent call last):
...
ParseError...
>>> pf.groupReqTokens([Lexeme.closeParen, 'jump'])
Traceback (most recent call last):
...
ParseError...
>>> pf.groupReqTokens(['jump', Lexeme.closeParen])
Traceback (most recent call last):
...
ParseError...
>>> pf.groupReqTokens([Lexeme.openParen, 'jump', Lexeme.closeParen])
[['jump']]
>>> pf.groupReqTokens(
...     [
...         Lexeme.openParen,
...         'jump',
...         Lexeme.orBar,
...         'climb',
...         Lexeme.closeParen,
...         Lexeme.ampersand,
...         'crawl',
...     ]
... )
[['jump', <Lexeme.orBar: ...>, 'climb'], <Lexeme.ampersand: ...>, 'crawl']
def groupReqTokensByPrecedence( self, tokenGroups: List[Union[Lexeme, str, List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]]]) -> List[Union[Lexeme, exploration.base.Requirement, List[Union[Lexeme, exploration.base.Requirement, ForwardRef('GroupedRequirementParts')]]]]:
3586    def groupReqTokensByPrecedence(
3587        self,
3588        tokenGroups: GroupedTokens
3589    ) -> GroupedRequirementParts:
3590        """
3591        Re-groups requirement tokens that have been grouped using
3592        `groupReqTokens` according to operator precedence, effectively
3593        creating an equivalent result which would have been obtained by
3594        `groupReqTokens` if all possible non-redundant explicit
3595        parentheses had been included.
3596
3597        Also turns each leaf part into a `Requirement`.
3598
3599        TODO: Make this actually reasonably efficient T_T
3600
3601        Examples:
3602
3603        >>> pf = ParseFormat()
3604        >>> r = pf.parseRequirement('capability&roomB::switch:on')
3605        >>> pf.groupReqTokensByPrecedence(
3606        ...     [
3607        ...         ['jump', Lexeme.orBar, 'climb'],
3608        ...         Lexeme.ampersand,
3609        ...         Lexeme.notMarker,
3610        ...         'coin',
3611        ...         Lexeme.tokenCount,
3612        ...         '3'
3613        ...     ]
3614        ... )
3615        [\
3616[\
3617[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]],\
3618 <Lexeme.ampersand: ...>,\
3619 [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]\
3620]\
3621]
3622        """
3623        subgrouped: List[Union[Lexeme, str, GroupedRequirementParts]] = []
3624        # First recursively group all parenthesized expressions
3625        for i, item in enumerate(tokenGroups):
3626            if isinstance(item, list):
3627                subgrouped.append(self.groupReqTokensByPrecedence(item))
3628            else:
3629                subgrouped.append(item)
3630
3631        # Now process all leaf requirements
3632        leavesConverted: GroupedRequirementParts = []
3633        i = 0
3634        while i < len(subgrouped):
3635            gItem = subgrouped[i]
3636
3637            if isinstance(gItem, list):
3638                leavesConverted.append(gItem)
3639            elif isinstance(gItem, Lexeme):
3640                leavesConverted.append(gItem)
3641            elif i == len(subgrouped) - 1:
3642                if isinstance(gItem, Lexeme):
3643                    raise ParseError(
3644                        f"Lexeme at end of requirement. Grouped tokens:"
3645                        f"\n{tokenGroups}"
3646                    )
3647                else:
3648                    assert isinstance(gItem, str)
3649                    if gItem == 'X':
3650                        leavesConverted.append(base.ReqImpossible())
3651                    elif gItem == 'O':
3652                        leavesConverted.append(base.ReqNothing())
3653                    else:
3654                        leavesConverted.append(base.ReqCapability(gItem))
3655            else:
3656                assert isinstance(gItem, str)
3657                try:
3658                    # TODO: Avoid list copy here...
3659                    couldBeMechanismSpecifier: LexedTokens = []
3660                    for ii in range(i, len(subgrouped)):
3661                        lexemeOrStr = subgrouped[ii]
3662                        if isinstance(lexemeOrStr, (Lexeme, str)):
3663                            couldBeMechanismSpecifier.append(lexemeOrStr)
3664                        else:
3665                            break
3666                    mSpec, mEnd = self.parseMechanismSpecifierFromTokens(
3667                        couldBeMechanismSpecifier
3668                    )
3669                    mEnd += i
3670                    if (
3671                        mEnd >= len(subgrouped) - 2
3672                     or subgrouped[mEnd + 1] != Lexeme.mechanismSeparator
3673                    ):
3674                        raise ParseError("Not a mechanism requirement.")
3675
3676                    mState = subgrouped[mEnd + 2]
3677                    if not isinstance(mState, base.MechanismState):
3678                        raise ParseError("Not a mechanism requirement.")
3679                    leavesConverted.append(base.ReqMechanism(mSpec, mState))
3680                    i = mEnd + 2  # + 1 will happen automatically below
3681                except ParseError:
3682                    following = subgrouped[i + 1]
3683                    if following in (
3684                        Lexeme.tokenCount,
3685                        Lexeme.mechanismSeparator,
3686                        Lexeme.wigglyLine,
3687                        Lexeme.skillLevel
3688                    ):
3689                        if (
3690                            i == len(subgrouped) - 2
3691                         or isinstance(subgrouped[i + 2], Lexeme)
3692                        ):
3693                            if following == Lexeme.wigglyLine:
3694                                # Default tag value is 1
3695                                leavesConverted.append(base.ReqTag(gItem, 1))
3696                                i += 1  # another +1 automatic below
3697                            else:
3698                                raise ParseError(
3699                                    f"Lexeme at end of requirement. Grouped"
3700                                    f" tokens:\n{tokenGroups}"
3701                                )
3702                        else:
3703                            afterwards = subgrouped[i + 2]
3704                            if not isinstance(afterwards, str):
3705                                raise ParseError(
3706                                    f"Lexeme after token/mechanism/tag/skill"
3707                                    f" separator at index {i}."
3708                                    f" Grouped tokens:\n{tokenGroups}"
3709                                )
3710                            i += 2  # another +1 automatic below
3711                            if following == Lexeme.tokenCount:
3712                                try:
3713                                    tCount = int(afterwards)
3714                                except ValueError:
3715                                    raise ParseError(
3716                                        f"Token count could not be"
3717                                        f" parsed as an integer:"
3718                                        f" {afterwards!r}. Grouped"
3719                                        f" tokens:\n{tokenGroups}"
3720                                    )
3721                                leavesConverted.append(
3722                                    base.ReqTokens(gItem, tCount)
3723                                )
3724                            elif following == Lexeme.mechanismSeparator:
3725                                leavesConverted.append(
3726                                    base.ReqMechanism(gItem, afterwards)
3727                                )
3728                            elif following == Lexeme.wigglyLine:
3729                                tVal = self.parseTagValue(afterwards)
3730                                leavesConverted.append(
3731                                    base.ReqTag(gItem, tVal)
3732                                )
3733                            else:
3734                                assert following == Lexeme.skillLevel
3735                                try:
3736                                    sLevel = int(afterwards)
3737                                except ValueError:
3738                                    raise ParseError(
3739                                        f"Skill level could not be"
3740                                        f" parsed as an integer:"
3741                                        f" {afterwards!r}. Grouped"
3742                                        f" tokens:\n{tokenGroups}"
3743                                    )
3744                                leavesConverted.append(
3745                                    base.ReqLevel(gItem, sLevel)
3746                                )
3747                    else:
3748                        if gItem == 'X':
3749                            leavesConverted.append(base.ReqImpossible())
3750                        elif gItem == 'O':
3751                            leavesConverted.append(base.ReqNothing())
3752                        else:
3753                            leavesConverted.append(
3754                                base.ReqCapability(gItem)
3755                            )
3756
3757            # Finally, increment our index:
3758            i += 1
3759
3760        # Now group all NOT operators
3761        i = 0
3762        notsGrouped: GroupedRequirementParts = []
3763        while i < len(leavesConverted):
3764            leafItem = leavesConverted[i]
3765            group = []
3766            while leafItem == Lexeme.notMarker:
3767                group.append(leafItem)
3768                i += 1
3769                if i >= len(leavesConverted):
3770                    raise ParseError(
3771                        f"NOT at end of tokens:\n{leavesConverted}"
3772                    )
3773                leafItem = leavesConverted[i]
3774            if group == []:
3775                notsGrouped.append(leafItem)
3776                i += 1
3777            else:
3778                group.append(leafItem)
3779                i += 1
3780                notsGrouped.append(group)
3781
3782        # Next group all AND operators
3783        i = 0
3784        andsGrouped: GroupedRequirementParts = []
3785        while i < len(notsGrouped):
3786            notGroupItem = notsGrouped[i]
3787            if notGroupItem == Lexeme.ampersand:
3788                if i == len(notsGrouped) - 1:
3789                    raise ParseError(
3790                        f"AND at end of group in tokens:"
3791                        f"\n{tokenGroups}"
3792                        f"Which had been grouped into:"
3793                        f"\n{notsGrouped}"
3794                    )
3795                itemAfter = notsGrouped[i + 1]
3796                if isinstance(itemAfter, Lexeme):
3797                    raise ParseError(
3798                        f"Lexeme after AND in of group in tokens:"
3799                        f"\n{tokenGroups}"
3800                        f"Which had been grouped into:"
3801                        f"\n{notsGrouped}"
3802                    )
3803                assert isinstance(itemAfter, (base.Requirement, list))
3804                prev = andsGrouped[-1]
3805                if (
3806                    isinstance(prev, list)
3807                and len(prev) > 2
3808                and prev[1] == Lexeme.ampersand
3809                ):
3810                    prev.extend(notsGrouped[i:i + 2])
3811                    i += 1  # with an extra +1 below
3812                else:
3813                    andsGrouped.append(
3814                        [andsGrouped.pop()] + notsGrouped[i:i + 2]
3815                    )
3816                    i += 1 # extra +1 below
3817            else:
3818                andsGrouped.append(notGroupItem)
3819            i += 1
3820
3821        # Finally check that we only have OR operators left over
3822        i = 0
3823        finalResult: GroupedRequirementParts = []
3824        while i < len(andsGrouped):
3825            andGroupItem = andsGrouped[i]
3826            if andGroupItem == Lexeme.orBar:
3827                if i == len(andsGrouped) - 1:
3828                    raise ParseError(
3829                        f"OR at end of group in tokens:"
3830                        f"\n{tokenGroups}"
3831                        f"Which had been grouped into:"
3832                        f"\n{andsGrouped}"
3833                    )
3834                itemAfter = andsGrouped[i + 1]
3835                if isinstance(itemAfter, Lexeme):
3836                    raise ParseError(
3837                        f"Lexeme after OR in of group in tokens:"
3838                        f"\n{tokenGroups}"
3839                        f"Which had been grouped into:"
3840                        f"\n{andsGrouped}"
3841                    )
3842                assert isinstance(itemAfter, (base.Requirement, list))
3843                prev = finalResult[-1]
3844                if (
3845                    isinstance(prev, list)
3846                and len(prev) > 2
3847                and prev[1] == Lexeme.orBar
3848                ):
3849                    prev.extend(andsGrouped[i:i + 2])
3850                    i += 1  # with an extra +1 below
3851                else:
3852                    finalResult.append(
3853                        [finalResult.pop()] + andsGrouped[i:i + 2]
3854                    )
3855                    i += 1 # extra +1 below
3856            elif isinstance(andGroupItem, Lexeme):
3857                raise ParseError(
3858                    f"Leftover lexeme when grouping ORs at index {i}"
3859                    f" in grouped tokens:\n{andsGrouped}"
3860                    f"\nOriginal tokens were:\n{tokenGroups}"
3861                )
3862            else:
3863                finalResult.append(andGroupItem)
3864            i += 1
3865
3866        return finalResult

Re-groups requirement tokens that have been grouped using groupReqTokens according to operator precedence, effectively creating an equivalent result which would have been obtained by groupReqTokens if all possible non-redundant explicit parentheses had been included.

Also turns each leaf part into a Requirement.

TODO: Make this actually reasonably efficient T_T

Examples:

>>> pf = ParseFormat()
>>> r = pf.parseRequirement('capability&roomB::switch:on')
>>> pf.groupReqTokensByPrecedence(
...     [
...         ['jump', Lexeme.orBar, 'climb'],
...         Lexeme.ampersand,
...         Lexeme.notMarker,
...         'coin',
...         Lexeme.tokenCount,
...         '3'
...     ]
... )
[[[[ReqCapability('jump'), <Lexeme.orBar: ...>, ReqCapability('climb')]], <Lexeme.ampersand: ...>, [<Lexeme.notMarker: ...>, ReqTokens('coin', 3)]]]
def parseRequirementFromRegroupedTokens( self, reqGroups: List[Union[Lexeme, exploration.base.Requirement, List[Union[Lexeme, exploration.base.Requirement, ForwardRef('GroupedRequirementParts')]]]]) -> exploration.base.Requirement:
3868    def parseRequirementFromRegroupedTokens(
3869        self,
3870        reqGroups: GroupedRequirementParts
3871    ) -> base.Requirement:
3872        """
3873        Recursive parser that works once tokens have been turned into
3874        requirements at the leaves and grouped by operator precedence
3875        otherwise (see `groupReqTokensByPrecedence`).
3876
3877        TODO: Simply by just doing this while grouping... ?
3878        """
3879        if len(reqGroups) == 0:
3880            raise ParseError("Ran out of tokens.")
3881
3882        elif len(reqGroups) == 1:
3883            only = reqGroups[0]
3884            if isinstance(only, list):
3885                return self.parseRequirementFromRegroupedTokens(only)
3886            elif isinstance(only, base.Requirement):
3887                return only
3888            else:
3889                raise ParseError(f"Invalid singleton group:\n{only}")
3890        elif reqGroups[0] == Lexeme.notMarker:
3891            if (
3892                not all(x == Lexeme.notMarker for x in reqGroups[:-1])
3893             or not isinstance(reqGroups[-1], (list, base.Requirement))
3894            ):
3895                raise ParseError(f"Invalid negation group:\n{reqGroups}")
3896            result = reqGroups[-1]
3897            if isinstance(result, list):
3898                result = self.parseRequirementFromRegroupedTokens(result)
3899            assert isinstance(result, base.Requirement)
3900            for i in range(len(reqGroups) - 1):
3901                result = base.ReqNot(result)
3902            return result
3903        elif len(reqGroups) % 2 == 0:
3904            raise ParseError(f"Even-length non-negation group:\n{reqGroups}")
3905        else:
3906            if (
3907                reqGroups[1] not in (Lexeme.ampersand, Lexeme.orBar)
3908             or not all(
3909                    reqGroups[i] == reqGroups[1]
3910                    for i in range(1, len(reqGroups), 2)
3911                )
3912            ):
3913                raise ParseError(
3914                    f"Inconsistent operator(s) in group:\n{reqGroups}"
3915                )
3916            op = reqGroups[1]
3917            operands = [
3918                (
3919                    self.parseRequirementFromRegroupedTokens(x)
3920                    if isinstance(x, list)
3921                    else x
3922                )
3923                for x in reqGroups[::2]
3924            ]
3925            if not all(isinstance(x, base.Requirement) for x in operands):
3926                raise ParseError(
3927                    f"Item not reducible to Requirement in AND group:"
3928                    f"\n{reqGroups}"
3929                )
3930            reqSequence = cast(Sequence[base.Requirement], operands)
3931            if op == Lexeme.ampersand:
3932                return base.ReqAll(reqSequence).flatten()
3933            else:
3934                assert op == Lexeme.orBar
3935                return base.ReqAny(reqSequence).flatten()

Recursive parser that works once tokens have been turned into requirements at the leaves and grouped by operator precedence otherwise (see groupReqTokensByPrecedence).

TODO: Simply by just doing this while grouping... ?

def parseRequirementFromGroupedTokens( self, tokenGroups: List[Union[Lexeme, str, List[Union[Lexeme, str, ForwardRef('GroupedTokens')]]]]) -> exploration.base.Requirement:
3937    def parseRequirementFromGroupedTokens(
3938        self,
3939        tokenGroups: GroupedTokens
3940    ) -> base.Requirement:
3941        """
3942        Parses a `base.Requirement` from a pre-grouped tokens list (see
3943        `groupReqTokens`). Uses the 'orBar', 'ampersand', 'notMarker',
3944        'tokenCount', and 'mechanismSeparator' `Lexeme`s to provide
3945        'or', 'and', and 'not' operators along with distinguishing
3946        between capabilities, tokens, and mechanisms.
3947
3948        Precedence ordering is not, then and, then or, but you are
3949        encouraged to use parentheses for explicit grouping (the
3950        'openParen' and 'closeParen' `Lexeme`s, although these must be
3951        handled by `groupReqTokens` so this function won't see them
3952        directly).
3953
3954        You can also use 'X' (without quotes) for a never-satisfied
3955        requirement, and 'O' (without quotes) for an always-satisfied
3956        requirement.
3957
3958        Note that when '!' is applied to a token requirement it flips
3959        the sense of the integer from 'must have at least this many' to
3960        'must have strictly less than this many'.
3961
3962        Raises a `ParseError` if the grouped tokens it is given cannot
3963        be parsed as a `Requirement`.
3964
3965        Examples:
3966
3967        >>> pf = ParseFormat()
3968        >>> pf.parseRequirementFromGroupedTokens(['capability'])
3969        ReqCapability('capability')
3970        >>> pf.parseRequirementFromGroupedTokens(
3971        ...     ['token', Lexeme.tokenCount, '3']
3972        ... )
3973        ReqTokens('token', 3)
3974        >>> pf.parseRequirementFromGroupedTokens(
3975        ...     ['mechanism', Lexeme.mechanismSeparator, 'state']
3976        ... )
3977        ReqMechanism('mechanism', 'state')
3978        >>> pf.parseRequirementFromGroupedTokens(
3979        ...     ['capability', Lexeme.orBar, 'token',
3980        ...      Lexeme.tokenCount, '3']
3981        ... )
3982        ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
3983        >>> pf.parseRequirementFromGroupedTokens(
3984        ...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
3985        ... )
3986        ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]),\
3987 ReqCapability('three')])
3988        >>> pf.parseRequirementFromGroupedTokens(
3989        ...     [
3990        ...         'one',
3991        ...         Lexeme.ampersand,
3992        ...         [
3993        ...              'two',
3994        ...              Lexeme.orBar,
3995        ...              'three'
3996        ...         ]
3997        ...     ]
3998        ... )
3999        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4000 ReqCapability('three')])])
4001        >>> pf.parseRequirementFromTokens(['X'])
4002        ReqImpossible()
4003        >>> pf.parseRequirementFromTokens(['O'])
4004        ReqNothing()
4005        >>> pf.parseRequirementFromTokens(
4006        ...     [Lexeme.openParen, 'O', Lexeme.closeParen]
4007        ... )
4008        ReqNothing()
4009        """
4010        if len(tokenGroups) == 0:
4011            raise ParseError("Ran out of tokens.")
4012
4013        reGrouped = self.groupReqTokensByPrecedence(tokenGroups)
4014
4015        return self.parseRequirementFromRegroupedTokens(reGrouped)

Parses a base.Requirement from a pre-grouped tokens list (see groupReqTokens). Uses the 'orBar', 'ampersand', 'notMarker', 'tokenCount', and 'mechanismSeparator' Lexemes to provide 'or', 'and', and 'not' operators along with distinguishing between capabilities, tokens, and mechanisms.

Precedence ordering is not, then and, then or, but you are encouraged to use parentheses for explicit grouping (the 'openParen' and 'closeParen' Lexemes, although these must be handled by groupReqTokens so this function won't see them directly).

You can also use 'X' (without quotes) for a never-satisfied requirement, and 'O' (without quotes) for an always-satisfied requirement.

Note that when '!' is applied to a token requirement it flips the sense of the integer from 'must have at least this many' to 'must have strictly less than this many'.

Raises a ParseError if the grouped tokens it is given cannot be parsed as a Requirement.

Examples:

>>> pf = ParseFormat()
>>> pf.parseRequirementFromGroupedTokens(['capability'])
ReqCapability('capability')
>>> pf.parseRequirementFromGroupedTokens(
...     ['token', Lexeme.tokenCount, '3']
... )
ReqTokens('token', 3)
>>> pf.parseRequirementFromGroupedTokens(
...     ['mechanism', Lexeme.mechanismSeparator, 'state']
... )
ReqMechanism('mechanism', 'state')
>>> pf.parseRequirementFromGroupedTokens(
...     ['capability', Lexeme.orBar, 'token',
...      Lexeme.tokenCount, '3']
... )
ReqAny([ReqCapability('capability'), ReqTokens('token', 3)])
>>> pf.parseRequirementFromGroupedTokens(
...     ['one', Lexeme.ampersand, 'two', Lexeme.orBar, 'three']
... )
ReqAny([ReqAll([ReqCapability('one'), ReqCapability('two')]), ReqCapability('three')])
>>> pf.parseRequirementFromGroupedTokens(
...     [
...         'one',
...         Lexeme.ampersand,
...         [
...              'two',
...              Lexeme.orBar,
...              'three'
...         ]
...     ]
... )
ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'), ReqCapability('three')])])
>>> pf.parseRequirementFromTokens(['X'])
ReqImpossible()
>>> pf.parseRequirementFromTokens(['O'])
ReqNothing()
>>> pf.parseRequirementFromTokens(
...     [Lexeme.openParen, 'O', Lexeme.closeParen]
... )
ReqNothing()
def parseRequirementFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Requirement:
4017    def parseRequirementFromTokens(
4018        self,
4019        tokens: LexedTokens,
4020        start: int = 0,
4021        end: int = -1
4022    ) -> base.Requirement:
4023        """
4024        Parses a requirement from `LexedTokens` by grouping them first
4025        and then using `parseRequirementFromGroupedTokens`.
4026
4027        For example:
4028
4029        >>> pf = ParseFormat()
4030        >>> pf.parseRequirementFromTokens(
4031        ...     [
4032        ...         'one',
4033        ...         Lexeme.ampersand,
4034        ...         Lexeme.openParen,
4035        ...         'two',
4036        ...         Lexeme.orBar,
4037        ...         'three',
4038        ...         Lexeme.closeParen
4039        ...     ]
4040        ... )
4041        ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'),\
4042 ReqCapability('three')])])
4043        """
4044        grouped = self.groupReqTokens(tokens, start, end)
4045        return self.parseRequirementFromGroupedTokens(grouped)

Parses a requirement from LexedTokens by grouping them first and then using parseRequirementFromGroupedTokens.

For example:

>>> pf = ParseFormat()
>>> pf.parseRequirementFromTokens(
...     [
...         'one',
...         Lexeme.ampersand,
...         Lexeme.openParen,
...         'two',
...         Lexeme.orBar,
...         'three',
...         Lexeme.closeParen
...     ]
... )
ReqAll([ReqCapability('one'), ReqAny([ReqCapability('two'), ReqCapability('three')])])
def parseRequirement(self, encoded: str) -> exploration.base.Requirement:
4047    def parseRequirement(self, encoded: str) -> base.Requirement:
4048        """
4049        Parses a `base.Requirement` from a string by calling `lex` and
4050        then feeding it into `ParseFormat.parseRequirementFromTokens`.
4051        As stated in `parseRequirementFromTokens`, the precedence
4052        binding order is NOT, then AND, then OR.
4053
4054        For example:
4055
4056        >>> pf = ParseFormat()
4057        >>> pf.parseRequirement('! coin * 3')
4058        ReqNot(ReqTokens('coin', 3))
4059        >>> pf.parseRequirement(
4060        ...     '  oneWord | "two words"|"three  words words" '
4061        ... )
4062        ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'),\
4063 ReqCapability('"three  words words"')])
4064        >>> pf.parseRequirement('words-with-dashes')
4065        ReqCapability('words-with-dashes')
4066        >>> r = pf.parseRequirement('capability&roomB::switch:on')
4067        >>> r
4068        ReqAll([ReqCapability('capability'),\
4069 ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB',\
4070 name='switch'), 'on')])
4071        >>> r.unparse()
4072        '(capability&roomB::switch:on)'
4073        >>> pf.parseRequirement('!!!one')
4074        ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
4075        >>> pf.parseRequirement('domain//zone::where::mechanism:state')
4076        ReqMechanism(MechanismSpecifier(domain='domain', zone='zone',\
4077 decision='where', name='mechanism'), 'state')
4078        >>> pf.parseRequirement('domain//mechanism:state')
4079        ReqMechanism(MechanismSpecifier(domain='domain', zone=None,\
4080 decision=None, name='mechanism'), 'state')
4081        >>> pf.parseRequirement('where::mechanism:state')
4082        ReqMechanism(MechanismSpecifier(domain=None, zone=None,\
4083 decision='where', name='mechanism'), 'state')
4084        >>> pf.parseRequirement('zone::where::mechanism:state')
4085        ReqMechanism(MechanismSpecifier(domain=None, zone='zone',\
4086 decision='where', name='mechanism'), 'state')
4087        >>> pf.parseRequirement('tag~')
4088        ReqTag('tag', 1)
4089        >>> pf.parseRequirement('tag~&tag2~')
4090        ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
4091        >>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
4092        ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3),\
4093 ReqTag('tag', 3.5), ReqLevel('skill', 3)])
4094        >>> pf.parseRequirement('tag~True|tag~False|tag~None')
4095        ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])
4096
4097        Precedence examples:
4098
4099        >>> pf.parseRequirement('A|B&C')
4100        ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'),\
4101 ReqCapability('C')])])
4102        >>> pf.parseRequirement('A&B|C')
4103        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4104 ReqCapability('C')])
4105        >>> pf.parseRequirement('(A&B)|C')
4106        ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4107 ReqCapability('C')])
4108        >>> pf.parseRequirement('(A&B|C)&D')
4109        ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]),\
4110 ReqCapability('C')]), ReqCapability('D')])
4111
4112        Error examples:
4113
4114        >>> pf.parseRequirement('one ! Word')
4115        Traceback (most recent call last):
4116        ...
4117        exploration.parsing.ParseError...
4118        >>> pf.parseRequirement('a|')
4119        Traceback (most recent call last):
4120        ...
4121        exploration.parsing.ParseError...
4122        >>> pf.parseRequirement('b!')
4123        Traceback (most recent call last):
4124        ...
4125        exploration.parsing.ParseError...
4126        >>> pf.parseRequirement('*emph*')
4127        Traceback (most recent call last):
4128        ...
4129        exploration.parsing.ParseError...
4130        >>> pf.parseRequirement('one&&two')
4131        Traceback (most recent call last):
4132        ...
4133        exploration.parsing.ParseError...
4134        >>> pf.parseRequirement('one!|two')
4135        Traceback (most recent call last):
4136        ...
4137        exploration.parsing.ParseError...
4138        >>> pf.parseRequirement('one*two')
4139        Traceback (most recent call last):
4140        ...
4141        exploration.parsing.ParseError...
4142        >>> pf.parseRequirement('one*')
4143        Traceback (most recent call last):
4144        ...
4145        exploration.parsing.ParseError...
4146        >>> pf.parseRequirement('()')
4147        Traceback (most recent call last):
4148        ...
4149        exploration.parsing.ParseError...
4150        >>> pf.parseRequirement('(one)*3')
4151        Traceback (most recent call last):
4152        ...
4153        exploration.parsing.ParseError...
4154        >>> pf.parseRequirement('a:')
4155        Traceback (most recent call last):
4156        ...
4157        exploration.parsing.ParseError...
4158        >>> pf.parseRequirement('a:b:c')
4159        Traceback (most recent call last):
4160        ...
4161        exploration.parsing.ParseError...
4162        >>> pf.parseRequirement('where::capability')
4163        Traceback (most recent call last):
4164        ...
4165        exploration.parsing.ParseError...
4166        """
4167        return self.parseRequirementFromTokens(
4168            lex(encoded, self.reverseFormat)
4169        )

Parses a base.Requirement from a string by calling lex and then feeding it into ParseFormat.parseRequirementFromTokens. As stated in parseRequirementFromTokens, the precedence binding order is NOT, then AND, then OR.

For example:

>>> pf = ParseFormat()
>>> pf.parseRequirement('! coin * 3')
ReqNot(ReqTokens('coin', 3))
>>> pf.parseRequirement(
...     '  oneWord | "two words"|"three  words words" '
... )
ReqAny([ReqCapability('oneWord'), ReqCapability('"two words"'), ReqCapability('"three  words words"')])
>>> pf.parseRequirement('words-with-dashes')
ReqCapability('words-with-dashes')
>>> r = pf.parseRequirement('capability&roomB::switch:on')
>>> r
ReqAll([ReqCapability('capability'), ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='roomB', name='switch'), 'on')])
>>> r.unparse()
'(capability&roomB::switch:on)'
>>> pf.parseRequirement('!!!one')
ReqNot(ReqNot(ReqNot(ReqCapability('one'))))
>>> pf.parseRequirement('domain//zone::where::mechanism:state')
ReqMechanism(MechanismSpecifier(domain='domain', zone='zone', decision='where', name='mechanism'), 'state')
>>> pf.parseRequirement('domain//mechanism:state')
ReqMechanism(MechanismSpecifier(domain='domain', zone=None, decision=None, name='mechanism'), 'state')
>>> pf.parseRequirement('where::mechanism:state')
ReqMechanism(MechanismSpecifier(domain=None, zone=None, decision='where', name='mechanism'), 'state')
>>> pf.parseRequirement('zone::where::mechanism:state')
ReqMechanism(MechanismSpecifier(domain=None, zone='zone', decision='where', name='mechanism'), 'state')
>>> pf.parseRequirement('tag~')
ReqTag('tag', 1)
>>> pf.parseRequirement('tag~&tag2~')
ReqAll([ReqTag('tag', 1), ReqTag('tag2', 1)])
>>> pf.parseRequirement('tag~value|tag~3|tag~3.5|skill^3')
ReqAny([ReqTag('tag', 'value'), ReqTag('tag', 3), ReqTag('tag', 3.5), ReqLevel('skill', 3)])
>>> pf.parseRequirement('tag~True|tag~False|tag~None')
ReqAny([ReqTag('tag', True), ReqTag('tag', False), ReqTag('tag', None)])

Precedence examples:

>>> pf.parseRequirement('A|B&C')
ReqAny([ReqCapability('A'), ReqAll([ReqCapability('B'), ReqCapability('C')])])
>>> pf.parseRequirement('A&B|C')
ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]), ReqCapability('C')])
>>> pf.parseRequirement('(A&B)|C')
ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]), ReqCapability('C')])
>>> pf.parseRequirement('(A&B|C)&D')
ReqAll([ReqAny([ReqAll([ReqCapability('A'), ReqCapability('B')]), ReqCapability('C')]), ReqCapability('D')])

Error examples:

>>> pf.parseRequirement('one ! Word')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('a|')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('b!')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('*emph*')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one&&two')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one!|two')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one*two')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('one*')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('()')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('(one)*3')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('a:')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('a:b:c')
Traceback (most recent call last):
...
ParseError...
>>> pf.parseRequirement('where::capability')
Traceback (most recent call last):
...
ParseError...
def parseSkillCombinationFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> Union[str, exploration.base.SkillCombination]:
4171    def parseSkillCombinationFromTokens(
4172        self,
4173        tokens: LexedTokens,
4174        start: int = 0,
4175        end: int = -1
4176    ) -> Union[base.Skill, base.SkillCombination]:
4177        """
4178        Parses a skill combination from the specified range within the
4179        given tokens list. If just a single string token is selected, it
4180        will be returned as a `base.BestSkill` with just that skill
4181        inside.
4182
4183        For example:
4184
4185        >>> pf = ParseFormat()
4186        >>> pf.parseSkillCombinationFromTokens(['climbing'])
4187        BestSkill('climbing')
4188        >>> tokens = [
4189        ...     'best',
4190        ...     Lexeme.openParen,
4191        ...     'brains',
4192        ...     Lexeme.sepOrDelay,
4193        ...     'brawn',
4194        ...     Lexeme.closeParen,
4195        ... ]
4196        >>> pf.parseSkillCombinationFromTokens(tokens)
4197        BestSkill('brains', 'brawn')
4198        >>> tokens[2] = '3'  # not a lexeme so it's a string
4199        >>> pf.parseSkillCombinationFromTokens(tokens)
4200        BestSkill(3, 'brawn')
4201        >>> tokens = [
4202        ...     Lexeme.wigglyLine,
4203        ...     Lexeme.wigglyLine,
4204        ...     'yes',
4205        ... ]
4206        >>> pf.parseSkillCombinationFromTokens(tokens)
4207        InverseSkill(InverseSkill('yes'))
4208        """
4209        start, end, nTokens = normalizeEnds(tokens, start, end)
4210
4211        first = tokens[start]
4212        if nTokens == 1:
4213            if isinstance(first, base.Skill):
4214                try:
4215                    level = int(first)
4216                    return base.BestSkill(level)
4217                except ValueError:
4218                    return base.BestSkill(first)
4219            else:
4220                raise ParseError(
4221                    "Invalid SkillCombination:\n{tokens[start:end + 1]"
4222                )
4223
4224        if first == Lexeme.wigglyLine:
4225            inv = self.parseSkillCombinationFromTokens(
4226                tokens,
4227                start + 1,
4228                end
4229            )
4230            if isinstance(inv, base.BestSkill) and len(inv.skills) == 1:
4231                return base.InverseSkill(inv.skills[0])
4232            else:
4233                return base.InverseSkill(inv)
4234
4235        second = tokens[start + 1]
4236        if second != Lexeme.openParen:
4237            raise ParseError(
4238                f"Invalid SkillCombination (missing paren):"
4239                f"\n{tokens[start:end + 1]}"
4240            )
4241
4242        parenEnd = self.matchingBrace(
4243            tokens,
4244            start + 1,
4245            Lexeme.openParen,
4246            Lexeme.closeParen
4247        )
4248        if parenEnd != end:
4249            raise ParseError(
4250                f"Extra junk after SkillCombination:"
4251                f"\n{tokens[parenEnd + 1:end + 1]}"
4252            )
4253
4254        if first == 'if':
4255            parts = list(
4256                findSeparatedParts(
4257                    tokens,
4258                    Lexeme.sepOrDelay,
4259                    start + 2,
4260                    end - 1,
4261                    Lexeme.openParen,
4262                    Lexeme.closeParen
4263                )
4264            )
4265            if len(parts) != 3:
4266                raise ParseError(
4267                    f"Wrong number of parts for ConditionalSkill (needs"
4268                    f" 3, got {len(parts)}:"
4269                    f"\n{tokens[start + 2:end]}"
4270                )
4271            reqStart, reqEnd = parts[0]
4272            ifStart, ifEnd = parts[1]
4273            elseStart, elseEnd = parts[2]
4274            return base.ConditionalSkill(
4275                self.parseRequirementFromTokens(tokens, reqStart, reqEnd),
4276                self.parseSkillCombinationFromTokens(tokens, ifStart, ifEnd),
4277                self.parseSkillCombinationFromTokens(
4278                    tokens,
4279                    elseStart,
4280                    elseEnd
4281                ),
4282            )
4283        elif first in ('sum', 'best', 'worst'):
4284            make: type[base.SkillCombination]
4285            if first == 'sum':
4286                make = base.CombinedSkill
4287            elif first == 'best':
4288                make = base.BestSkill
4289            else:
4290                make = base.WorstSkill
4291
4292            subs = []
4293            for partStart, partEnd in findSeparatedParts(
4294                tokens,
4295                Lexeme.sepOrDelay,
4296                start + 2,
4297                end - 1,
4298                Lexeme.openParen,
4299                Lexeme.closeParen
4300            ):
4301                sub = self.parseSkillCombinationFromTokens(
4302                    tokens,
4303                    partStart,
4304                    partEnd
4305                )
4306                if (
4307                    isinstance(sub, base.BestSkill)
4308                and len(sub.skills) == 1
4309                ):
4310                    subs.append(sub.skills[0])
4311                else:
4312                    subs.append(sub)
4313
4314            return make(*subs)
4315        else:
4316            raise ParseError(
4317                "Invalid SkillCombination:\n{tokens[start:end + 1]"
4318            )

Parses a skill combination from the specified range within the given tokens list. If just a single string token is selected, it will be returned as a base.BestSkill with just that skill inside.

For example:

>>> pf = ParseFormat()
>>> pf.parseSkillCombinationFromTokens(['climbing'])
BestSkill('climbing')
>>> tokens = [
...     'best',
...     Lexeme.openParen,
...     'brains',
...     Lexeme.sepOrDelay,
...     'brawn',
...     Lexeme.closeParen,
... ]
>>> pf.parseSkillCombinationFromTokens(tokens)
BestSkill('brains', 'brawn')
>>> tokens[2] = '3'  # not a lexeme so it's a string
>>> pf.parseSkillCombinationFromTokens(tokens)
BestSkill(3, 'brawn')
>>> tokens = [
...     Lexeme.wigglyLine,
...     Lexeme.wigglyLine,
...     'yes',
... ]
>>> pf.parseSkillCombinationFromTokens(tokens)
InverseSkill(InverseSkill('yes'))
def parseSkillCombination(self, encoded: str) -> exploration.base.SkillCombination:
4320    def parseSkillCombination(
4321        self,
4322        encoded: str
4323    ) -> base.SkillCombination:
4324        """
4325        Parses a `SkillCombination` from a string. Calls `lex` and then
4326        `parseSkillCombinationFromTokens`.
4327        """
4328        result = self.parseSkillCombinationFromTokens(
4329            lex(encoded, self.reverseFormat)
4330        )
4331        if not isinstance(result, base.SkillCombination):
4332            return base.BestSkill(result)
4333        else:
4334            return result

Parses a SkillCombination from a string. Calls lex and then parseSkillCombinationFromTokens.

def parseConditionFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Condition:
4336    def parseConditionFromTokens(
4337        self,
4338        tokens: LexedTokens,
4339        start: int = 0,
4340        end: int = -1
4341    ) -> base.Condition:
4342        """
4343        Parses a `base.Condition` from a lexed tokens list. For example:
4344
4345        >>> pf = ParseFormat()
4346        >>> tokens = [
4347        ...     Lexeme.doubleQuestionmark,
4348        ...     Lexeme.openParen,
4349        ...     "fire",
4350        ...     Lexeme.ampersand,
4351        ...     "water",
4352        ...     Lexeme.closeParen,
4353        ...     Lexeme.openCurly,
4354        ...     "gain",
4355        ...     "wind",
4356        ...     Lexeme.closeCurly,
4357        ...     Lexeme.openCurly,
4358        ...     Lexeme.closeCurly,
4359        ... ]
4360        >>> pf.parseConditionFromTokens(tokens) == base.condition(
4361        ...     condition=base.ReqAll([
4362        ...         base.ReqCapability('fire'),
4363        ...         base.ReqCapability('water')
4364        ...     ]),
4365        ...     consequence=[base.effect(gain='wind')]
4366        ... )
4367        True
4368        """
4369        start, end, nTokens = normalizeEnds(tokens, start, end)
4370        if nTokens < 8:
4371            raise ParseError(
4372                f"A Condition requires at least 8 tokens (got {nTokens})."
4373            )
4374        if tokens[start] != Lexeme.doubleQuestionmark:
4375            raise ParseError(
4376                f"A Condition must start with"
4377                f" {repr(self.formatDict[Lexeme.doubleQuestionmark])}"
4378            )
4379        try:
4380            consequenceStart = tokens.index(Lexeme.openCurly, start)
4381        except ValueError:
4382            raise ParseError("A condition must include a consequence block.")
4383        consequenceEnd = self.matchingBrace(tokens, consequenceStart)
4384        altStart = consequenceEnd + 1
4385        altEnd = self.matchingBrace(tokens, altStart)
4386
4387        if altEnd != end:
4388            raise ParseError(
4389                f"Junk after condition:\n{tokens[altEnd + 1: end + 1]}"
4390            )
4391
4392        return base.condition(
4393            condition=self.parseRequirementFromTokens(
4394                tokens,
4395                start + 1,
4396                consequenceStart - 1
4397            ),
4398            consequence=self.parseConsequenceFromTokens(
4399                tokens,
4400                consequenceStart,
4401                consequenceEnd
4402            ),
4403            alternative=self.parseConsequenceFromTokens(
4404                tokens,
4405                altStart,
4406                altEnd
4407            )
4408        )

Parses a base.Condition from a lexed tokens list. For example:

>>> pf = ParseFormat()
>>> tokens = [
...     Lexeme.doubleQuestionmark,
...     Lexeme.openParen,
...     "fire",
...     Lexeme.ampersand,
...     "water",
...     Lexeme.closeParen,
...     Lexeme.openCurly,
...     "gain",
...     "wind",
...     Lexeme.closeCurly,
...     Lexeme.openCurly,
...     Lexeme.closeCurly,
... ]
>>> pf.parseConditionFromTokens(tokens) == base.condition(
...     condition=base.ReqAll([
...         base.ReqCapability('fire'),
...         base.ReqCapability('water')
...     ]),
...     consequence=[base.effect(gain='wind')]
... )
True
def parseCondition(self, encoded: str) -> exploration.base.Condition:
4410    def parseCondition(
4411        self,
4412        encoded: str
4413    ) -> base.Condition:
4414        """
4415        Lexes the given string and then calls `parseConditionFromTokens`
4416        to return a `base.Condition`.
4417        """
4418        return self.parseConditionFromTokens(
4419            lex(encoded, self.reverseFormat)
4420        )

Lexes the given string and then calls parseConditionFromTokens to return a base.Condition.

def parseChallengeFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> exploration.base.Challenge:
4422    def parseChallengeFromTokens(
4423        self,
4424        tokens: LexedTokens,
4425        start: int = 0,
4426        end: int = -1
4427    ) -> base.Challenge:
4428        """
4429        Parses a `base.Challenge` from a lexed tokens list.
4430
4431        For example:
4432
4433        >>> pf = ParseFormat()
4434        >>> tokens = [
4435        ...     Lexeme.angleLeft,
4436        ...     '2',
4437        ...     Lexeme.angleRight,
4438        ...     'best',
4439        ...     Lexeme.openParen,
4440        ...     "chess",
4441        ...     Lexeme.sepOrDelay,
4442        ...     "checkers",
4443        ...     Lexeme.closeParen,
4444        ...     Lexeme.openCurly,
4445        ...     "gain",
4446        ...     "coin",
4447        ...     Lexeme.tokenCount,
4448        ...     "5",
4449        ...     Lexeme.closeCurly,
4450        ...     Lexeme.angleRight,
4451        ...     Lexeme.openCurly,
4452        ...     "lose",
4453        ...     "coin",
4454        ...     Lexeme.tokenCount,
4455        ...     "5",
4456        ...     Lexeme.closeCurly,
4457        ... ]
4458        >>> c = pf.parseChallengeFromTokens(tokens)
4459        >>> c['skills'] == base.BestSkill('chess', 'checkers')
4460        True
4461        >>> c['level']
4462        2
4463        >>> c['success'] == [base.effect(gain=('coin', 5))]
4464        True
4465        >>> c['failure'] == [base.effect(lose=('coin', 5))]
4466        True
4467        >>> c['outcome']
4468        False
4469        >>> c == base.challenge(
4470        ...     skills=base.BestSkill('chess', 'checkers'),
4471        ...     level=2,
4472        ...     success=[base.effect(gain=('coin', 5))],
4473        ...     failure=[base.effect(lose=('coin', 5))],
4474        ...     outcome=False
4475        ... )
4476        True
4477        >>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
4478        >>> c == pf.parseChallengeFromTokens(t2, 1, -2)
4479        True
4480        """
4481        start, end, nTokens = normalizeEnds(tokens, start, end)
4482        if nTokens < 8:
4483            raise ParseError(
4484                f"Not enough tokens for a challenge: {nTokens}"
4485            )
4486        if tokens[start] != Lexeme.angleLeft:
4487            raise ParseError(
4488                f"Challenge must start with"
4489                f" {repr(self.formatDict[Lexeme.angleLeft])}"
4490            )
4491        levelStr = tokens[start + 1]
4492        if isinstance(levelStr, Lexeme):
4493            raise ParseError(
4494                f"Challenge must start with a level in angle brackets"
4495                f" (got {repr(self.formatDict[levelStr])})."
4496            )
4497        if tokens[start + 2] != Lexeme.angleRight:
4498            raise ParseError(
4499                f"Challenge must include"
4500                f" {repr(self.formatDict[Lexeme.angleRight])} after"
4501                f" the level."
4502            )
4503        try:
4504            level = int(levelStr)
4505        except ValueError:
4506            raise ParseError(
4507                f"Challenge level must be an integer (got"
4508                f" {repr(tokens[start + 1])}."
4509            )
4510        try:
4511            successStart = tokens.index(Lexeme.openCurly, start)
4512            skillsEnd = successStart - 1
4513        except ValueError:
4514            raise ParseError("A challenge must include a consequence block.")
4515
4516        outcome: Optional[bool] = None
4517        if tokens[skillsEnd] == Lexeme.angleRight:
4518            skillsEnd -= 1
4519            outcome = True
4520        successEnd = self.matchingBrace(tokens, successStart)
4521        failStart = successEnd + 1
4522        if tokens[failStart] == Lexeme.angleRight:
4523            failStart += 1
4524            if outcome is not None:
4525                raise ParseError(
4526                    "Cannot indicate both success and failure as"
4527                    " outcomes in a challenge."
4528                )
4529            outcome = False
4530        failEnd = self.matchingBrace(tokens, failStart)
4531
4532        if failEnd != end:
4533            raise ParseError(
4534                f"Junk after condition:\n{tokens[failEnd + 1:end + 1]}"
4535            )
4536
4537        skills = self.parseSkillCombinationFromTokens(
4538            tokens,
4539            start + 3,
4540            skillsEnd
4541        )
4542        if isinstance(skills, base.Skill):
4543            skills = base.BestSkill(skills)
4544
4545        return base.challenge(
4546            level=level,
4547            outcome=outcome,
4548            skills=skills,
4549            success=self.parseConsequenceFromTokens(
4550                tokens[successStart:successEnd + 1]
4551            ),
4552            failure=self.parseConsequenceFromTokens(
4553                tokens[failStart:failEnd + 1]
4554            )
4555        )

Parses a base.Challenge from a lexed tokens list.

For example:

>>> pf = ParseFormat()
>>> tokens = [
...     Lexeme.angleLeft,
...     '2',
...     Lexeme.angleRight,
...     'best',
...     Lexeme.openParen,
...     "chess",
...     Lexeme.sepOrDelay,
...     "checkers",
...     Lexeme.closeParen,
...     Lexeme.openCurly,
...     "gain",
...     "coin",
...     Lexeme.tokenCount,
...     "5",
...     Lexeme.closeCurly,
...     Lexeme.angleRight,
...     Lexeme.openCurly,
...     "lose",
...     "coin",
...     Lexeme.tokenCount,
...     "5",
...     Lexeme.closeCurly,
... ]
>>> c = pf.parseChallengeFromTokens(tokens)
>>> c['skills'] == base.BestSkill('chess', 'checkers')
True
>>> c['level']
2
>>> c['success'] == [base.effect(gain=('coin', 5))]
True
>>> c['failure'] == [base.effect(lose=('coin', 5))]
True
>>> c['outcome']
False
>>> c == base.challenge(
...     skills=base.BestSkill('chess', 'checkers'),
...     level=2,
...     success=[base.effect(gain=('coin', 5))],
...     failure=[base.effect(lose=('coin', 5))],
...     outcome=False
... )
True
>>> t2 = ['hi'] + tokens + ['bye']  # parsing only part of the list
>>> c == pf.parseChallengeFromTokens(t2, 1, -2)
True
def parseChallenge(self, encoded: str) -> exploration.base.Challenge:
4557    def parseChallenge(
4558        self,
4559        encoded: str
4560    ) -> base.Challenge:
4561        """
4562        Lexes the given string and then calls `parseChallengeFromTokens`
4563        to return a `base.Challenge`.
4564        """
4565        return self.parseChallengeFromTokens(
4566            lex(encoded, self.reverseFormat)
4567        )

Lexes the given string and then calls parseChallengeFromTokens to return a base.Challenge.

def parseConsequenceFromTokens( self, tokens: List[Union[Lexeme, str]], start: int = 0, end: int = -1) -> List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]:
4569    def parseConsequenceFromTokens(
4570        self,
4571        tokens: LexedTokens,
4572        start: int = 0,
4573        end: int = -1
4574    ) -> base.Consequence:
4575        """
4576        Parses a consequence from a lexed token list. If start and/or end
4577        are specified, only processes the part of the list between those
4578        two indices (inclusive). Use `lex` to turn a string into a
4579        `LexedTokens` list (or use `ParseFormat.parseConsequence` which
4580        does that for you).
4581
4582        An example:
4583
4584        >>> pf = ParseFormat()
4585        >>> tokens = [
4586        ...     Lexeme.openCurly,
4587        ...     'gain',
4588        ...     'power',
4589        ...     Lexeme.closeCurly
4590        ... ]
4591        >>> c = pf.parseConsequenceFromTokens(tokens)
4592        >>> c == [base.effect(gain='power')]
4593        True
4594        >>> tokens.append('hi')
4595        >>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
4596        True
4597        >>> c == pf.parseConsequenceFromTokens(tokens, end=3)
4598        True
4599        """
4600        start, end, nTokens = normalizeEnds(tokens, start, end)
4601
4602        if nTokens < 2:
4603            raise ParseError("Consequence must have at least two tokens.")
4604
4605        if tokens[start] != Lexeme.openCurly:
4606            raise ParseError(
4607                f"Consequence must start with an open curly brace:"
4608                f" {repr(self.formatDict[Lexeme.openCurly])}."
4609            )
4610
4611        if tokens[end] != Lexeme.closeCurly:
4612            raise ParseError(
4613                f"Consequence must end with a closing curly brace:"
4614                f" {repr(self.formatDict[Lexeme.closeCurly])}."
4615            )
4616
4617        if nTokens == 2:
4618            return []
4619
4620        result: base.Consequence = []
4621        for partStart, partEnd in findSeparatedParts(
4622            tokens,
4623            Lexeme.consequenceSeparator,
4624            start + 1,
4625            end - 1,
4626            Lexeme.openCurly,
4627            Lexeme.closeCurly
4628        ):
4629            if partEnd - partStart < 0:
4630                raise ParseError("Empty consequence part.")
4631            if tokens[partStart] == Lexeme.angleLeft:  # a challenge
4632                result.append(
4633                    self.parseChallengeFromTokens(
4634                        tokens,
4635                        partStart,
4636                        partEnd
4637                    )
4638                )
4639            elif tokens[partStart] == Lexeme.doubleQuestionmark:  # condition
4640                result.append(
4641                    self.parseConditionFromTokens(
4642                        tokens,
4643                        partStart,
4644                        partEnd
4645                    )
4646                )
4647            else:  # Must be an effect
4648                result.append(
4649                    self.parseEffectFromTokens(
4650                        tokens,
4651                        partStart,
4652                        partEnd
4653                    )
4654                )
4655
4656        return result

Parses a consequence from a lexed token list. If start and/or end are specified, only processes the part of the list between those two indices (inclusive). Use lex to turn a string into a LexedTokens list (or use ParseFormat.parseConsequence which does that for you).

An example:

>>> pf = ParseFormat()
>>> tokens = [
...     Lexeme.openCurly,
...     'gain',
...     'power',
...     Lexeme.closeCurly
... ]
>>> c = pf.parseConsequenceFromTokens(tokens)
>>> c == [base.effect(gain='power')]
True
>>> tokens.append('hi')
>>> c == pf.parseConsequenceFromTokens(tokens, end=-2)
True
>>> c == pf.parseConsequenceFromTokens(tokens, end=3)
True
def parseConsequence( self, encoded: str) -> List[Union[exploration.base.Challenge, exploration.base.Effect, exploration.base.Condition]]:
4658    def parseConsequence(self, encoded: str) -> base.Consequence:
4659        """
4660        Parses a consequence from a string. Uses `lex` and
4661        `ParseFormat.parseConsequenceFromTokens`. For example:
4662
4663        >>> pf = ParseFormat()
4664        >>> c = pf.parseConsequence(
4665        ...   '{gain power}'
4666        ... )
4667        >>> c == [base.effect(gain='power')]
4668        True
4669        >>> pf.unparseConsequence(c)
4670        '{gain power}'
4671        >>> c = pf.parseConsequence(
4672        ...     '{\\n'
4673        ...     '    ??(brawny|!weights*3){\\n'
4674        ...     '        <3>sum(brains, brawn){goto home}>{bounce}\\n'
4675        ...     '    }{};\\n'
4676        ...     '    lose coin*1\\n'
4677        ...     '}'
4678        ... )
4679        >>> len(c)
4680        2
4681        >>> c[0]['condition'] == base.ReqAny([
4682        ...     base.ReqCapability('brawny'),
4683        ...     base.ReqNot(base.ReqTokens('weights', 3))
4684        ... ])
4685        True
4686        >>> len(c[0]['consequence'])
4687        1
4688        >>> len(c[0]['alternative'])
4689        0
4690        >>> cons = c[0]['consequence'][0]
4691        >>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
4692        True
4693        >>> cons['level']
4694        3
4695        >>> len(cons['success'])
4696        1
4697        >>> len(cons['failure'])
4698        1
4699        >>> cons['success'][0] == base.effect(goto='home')
4700        True
4701        >>> cons['failure'][0] == base.effect(bounce=True)
4702        True
4703        >>> cons['outcome'] = False
4704        >>> c[0] == base.condition(
4705        ...     condition=base.ReqAny([
4706        ...         base.ReqCapability('brawny'),
4707        ...         base.ReqNot(base.ReqTokens('weights', 3))
4708        ...     ]),
4709        ...     consequence=[
4710        ...         base.challenge(
4711        ...             skills=base.CombinedSkill('brains', 'brawn'),
4712        ...             level=3,
4713        ...             success=[base.effect(goto='home')],
4714        ...             failure=[base.effect(bounce=True)],
4715        ...             outcome=False
4716        ...         )
4717        ...     ]
4718        ... )
4719        True
4720        >>> c[1] == base.effect(lose=('coin', 1))
4721        True
4722        """
4723        return self.parseConsequenceFromTokens(
4724            lex(encoded, self.reverseFormat)
4725        )

Parses a consequence from a string. Uses lex and ParseFormat.parseConsequenceFromTokens. For example:

>>> pf = ParseFormat()
>>> c = pf.parseConsequence(
...   '{gain power}'
... )
>>> c == [base.effect(gain='power')]
True
>>> pf.unparseConsequence(c)
'{gain power}'
>>> c = pf.parseConsequence(
...     '{\n'
...     '    ??(brawny|!weights*3){\n'
...     '        <3>sum(brains, brawn){goto home}>{bounce}\n'
...     '    }{};\n'
...     '    lose coin*1\n'
...     '}'
... )
>>> len(c)
2
>>> c[0]['condition'] == base.ReqAny([
...     base.ReqCapability('brawny'),
...     base.ReqNot(base.ReqTokens('weights', 3))
... ])
True
>>> len(c[0]['consequence'])
1
>>> len(c[0]['alternative'])
0
>>> cons = c[0]['consequence'][0]
>>> cons['skills'] == base.CombinedSkill('brains', 'brawn')
True
>>> cons['level']
3
>>> len(cons['success'])
1
>>> len(cons['failure'])
1
>>> cons['success'][0] == base.effect(goto='home')
True
>>> cons['failure'][0] == base.effect(bounce=True)
True
>>> cons['outcome'] = False
>>> c[0] == base.condition(
...     condition=base.ReqAny([
...         base.ReqCapability('brawny'),
...         base.ReqNot(base.ReqTokens('weights', 3))
...     ]),
...     consequence=[
...         base.challenge(
...             skills=base.CombinedSkill('brains', 'brawn'),
...             level=3,
...             success=[base.effect(goto='home')],
...             failure=[base.effect(bounce=True)],
...             outcome=False
...         )
...     ]
... )
True
>>> c[1] == base.effect(lose=('coin', 1))
True
class ParsedDotGraph(typing.TypedDict):
4732class ParsedDotGraph(TypedDict):
4733    """
4734    Represents a parsed `graphviz` dot-format graph consisting of nodes,
4735    edges, and subgraphs, with attributes attached to nodes and/or
4736    edges. An intermediate format during conversion to a full
4737    `DecisionGraph`. Includes the following slots:
4738
4739    - `'nodes'`: A list of tuples each holding a node ID followed by a
4740        list of name/value attribute pairs.
4741    - `'edges'`: A list of tuples each holding a from-ID, a to-ID,
4742        and then a list of name/value attribute pairs.
4743    - `'attrs'`: A list of tuples each holding a name/value attribute
4744        pair for graph-level attributes.
4745    - `'subgraphs'`: A list of subgraphs (each a tuple with a subgraph
4746        name and then another dictionary in the same format as this
4747        one).
4748    """
4749    nodes: List[Tuple[int, List[Tuple[str, str]]]]
4750    edges: List[Tuple[int, int, List[Tuple[str, str]]]]
4751    attrs: List[Tuple[str, str]]
4752    subgraphs: List[Tuple[str, 'ParsedDotGraph']]

Represents a parsed graphviz dot-format graph consisting of nodes, edges, and subgraphs, with attributes attached to nodes and/or edges. An intermediate format during conversion to a full DecisionGraph. Includes the following slots:

  • 'nodes': A list of tuples each holding a node ID followed by a list of name/value attribute pairs.
  • 'edges': A list of tuples each holding a from-ID, a to-ID, and then a list of name/value attribute pairs.
  • 'attrs': A list of tuples each holding a name/value attribute pair for graph-level attributes.
  • 'subgraphs': A list of subgraphs (each a tuple with a subgraph name and then another dictionary in the same format as this one).
nodes: List[Tuple[int, List[Tuple[str, str]]]]
edges: List[Tuple[int, int, List[Tuple[str, str]]]]
attrs: List[Tuple[str, str]]
subgraphs: List[Tuple[str, ParsedDotGraph]]
def parseSimpleDotAttrs(fragment: str) -> List[Tuple[str, str]]:
4755def parseSimpleDotAttrs(fragment: str) -> List[Tuple[str, str]]:
4756    """
4757    Given a string fragment that starts with '[' and ends with ']',
4758    parses a simple attribute list in `graphviz` dot format from that
4759    fragment, returning a list of name/value attribute tuples. Raises a
4760    `DotParseError` if the fragment doesn't have the right format.
4761
4762    Examples:
4763
4764    >>> parseSimpleDotAttrs('[ name=value ]')
4765    [('name', 'value')]
4766    >>> parseSimpleDotAttrs('[ a=b c=d e=f ]')
4767    [('a', 'b'), ('c', 'd'), ('e', 'f')]
4768    >>> parseSimpleDotAttrs('[ a=b "c d"="e f" ]')
4769    [('a', 'b'), ('c d', 'e f')]
4770    >>> parseSimpleDotAttrs('[a=b "c d"="e f"]')
4771    [('a', 'b'), ('c d', 'e f')]
4772    >>> parseSimpleDotAttrs('[ a=b "c d"="e f"')
4773    Traceback (most recent call last):
4774    ...
4775    exploration.parsing.DotParseError...
4776    >>> parseSimpleDotAttrs('a=b "c d"="e f" ]')
4777    Traceback (most recent call last):
4778    ...
4779    exploration.parsing.DotParseError...
4780    >>> parseSimpleDotAttrs('[ a b=c ]')
4781    Traceback (most recent call last):
4782    ...
4783    exploration.parsing.DotParseError...
4784    >>> parseSimpleDotAttrs('[ a=b c ]')
4785    Traceback (most recent call last):
4786    ...
4787    exploration.parsing.DotParseError...
4788    >>> parseSimpleDotAttrs('[ name="value" ]')
4789    [('name', 'value')]
4790    >>> parseSimpleDotAttrs('[ name="\\\\"value\\\\"" ]')
4791    [('name', '"value"')]
4792    """
4793    if not fragment.startswith('[') or not fragment.endswith(']'):
4794        raise DotParseError(
4795            f"Simple attrs fragment missing delimiters:"
4796            f"\n  {repr(fragment)}"
4797        )
4798    result = []
4799    rest = fragment[1:-1].strip()
4800    while rest:
4801        # Get possibly-quoted attribute name:
4802        if rest.startswith('"'):
4803            try:
4804                aName, rest = utils.unquoted(rest)
4805            except ValueError:
4806                raise DotParseError(
4807                    f"Malformed quoted attribute name in"
4808                    f" fragment:\n  {repr(fragment)}"
4809                )
4810            rest = rest.lstrip()
4811            if not rest.startswith('='):
4812                raise DotParseError(
4813                    f"Missing '=' in attribute block in"
4814                    f" fragment:\n  {repr(fragment)}"
4815                )
4816            rest = rest[1:].lstrip()
4817        else:
4818            try:
4819                eqInd = rest.index('=')
4820            except ValueError:
4821                raise DotParseError(
4822                    f"Missing '=' in attribute block in"
4823                    f" fragment:\n  {repr(fragment)}"
4824                )
4825            aName = rest[:eqInd]
4826            if ' ' in aName:
4827                raise DotParseError(
4828                    f"Malformed unquoted attribute name"
4829                    f" {repr(aName)} in fragment:"
4830                    f"\n  {repr(fragment)}"
4831                )
4832            rest = rest[eqInd + 1:].lstrip()
4833
4834        # Get possibly-quoted attribute value:
4835        if rest.startswith('"'):
4836            try:
4837                aVal, rest = utils.unquoted(rest)
4838            except ValueError:
4839                raise DotParseError(
4840                    f"Malformed quoted attribute value in"
4841                    f" fragment:\n  {repr(fragment)}"
4842                )
4843            rest = rest.lstrip()
4844        else:
4845            try:
4846                spInd = rest.index(' ')
4847            except ValueError:
4848                spInd = len(rest)
4849            aVal = rest[:spInd]
4850            rest = rest[spInd:].lstrip()
4851
4852        # Append this attribute pair and continue parsing
4853        result.append((aName, aVal))
4854
4855    return result

Given a string fragment that starts with '[' and ends with ']', parses a simple attribute list in graphviz dot format from that fragment, returning a list of name/value attribute tuples. Raises a DotParseError if the fragment doesn't have the right format.

Examples:

>>> parseSimpleDotAttrs('[ name=value ]')
[('name', 'value')]
>>> parseSimpleDotAttrs('[ a=b c=d e=f ]')
[('a', 'b'), ('c', 'd'), ('e', 'f')]
>>> parseSimpleDotAttrs('[ a=b "c d"="e f" ]')
[('a', 'b'), ('c d', 'e f')]
>>> parseSimpleDotAttrs('[a=b "c d"="e f"]')
[('a', 'b'), ('c d', 'e f')]
>>> parseSimpleDotAttrs('[ a=b "c d"="e f"')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('a=b "c d"="e f" ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('[ a b=c ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('[ a=b c ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseSimpleDotAttrs('[ name="value" ]')
[('name', 'value')]
>>> parseSimpleDotAttrs('[ name="\\"value\\"" ]')
[('name', '"value"')]
def parseDotNode(nodeLine: str) -> Tuple[int, Union[bool, List[Tuple[str, str]]]]:
4858def parseDotNode(
4859    nodeLine: str
4860) -> Tuple[int, Union[bool, List[Tuple[str, str]]]]:
4861    """
4862    Given a line of text from a `graphviz` dot-format graph
4863    (possibly ending in an '[' to indicate attributes to follow, or
4864    possible including a '[ ... ]' block with attributes in-line),
4865    parses it as a node declaration, returning the ID of the node,
4866    along with a boolean indicating whether attributes follow or
4867    not. If an inline attribute block is present, the second member
4868    of the tuple will be a list of attribute name/value pairs. In
4869    that case, all attribute names and values must either be quoted
4870    or not include spaces.
4871    Examples:
4872
4873    >>> parseDotNode('1')
4874    (1, False)
4875    >>> parseDotNode(' 1 [ ')
4876    (1, True)
4877    >>> parseDotNode(' 1 [ a=b "c d"="e f" ] ')
4878    (1, [('a', 'b'), ('c d', 'e f')])
4879    >>> parseDotNode(' 3 [ name="A = \\\\"grate:open\\\\"" ]')
4880    (3, [('name', 'A = "grate:open"')])
4881    >>> parseDotNode('  "1"[')
4882    (1, True)
4883    >>> parseDotNode('  100[')
4884    (100, True)
4885    >>> parseDotNode('  1 2')
4886    Traceback (most recent call last):
4887    ...
4888    exploration.parsing.DotParseError...
4889    >>> parseDotNode('  1 [ 2')
4890    Traceback (most recent call last):
4891    ...
4892    exploration.parsing.DotParseError...
4893    >>> parseDotNode('  1 2')
4894    Traceback (most recent call last):
4895    ...
4896    exploration.parsing.DotParseError...
4897    >>> parseDotNode('  1 [ junk not=attrs ]')
4898    Traceback (most recent call last):
4899    ...
4900    exploration.parsing.DotParseError...
4901    >>> parseDotNode('  \\n')
4902    Traceback (most recent call last):
4903    ...
4904    exploration.parsing.DotParseError...
4905    """
4906    stripped = nodeLine.strip()
4907    if len(stripped) == 0:
4908        raise DotParseError(
4909            "Empty node in dot graph on line:\n  {repr(nodeLine)}"
4910        )
4911    hasAttrs: Union[bool, List[Tuple[str, str]]] = False
4912    if stripped.startswith('"'):
4913        nodeName, rest = utils.unquoted(stripped)
4914        rest = rest.strip()
4915        if rest == '[':
4916            hasAttrs = True
4917        elif rest.startswith('[') and rest.endswith(']'):
4918            hasAttrs = parseSimpleDotAttrs(rest)
4919        elif rest:
4920            raise DotParseError(
4921                f"Extra junk {repr(rest)} after node on line:"
4922                f"\n {repr(nodeLine)}"
4923            )
4924
4925    else:
4926        if stripped.endswith('['):
4927            hasAttrs = True
4928            stripped = stripped[:-1].rstrip()
4929        elif stripped.endswith(']'):
4930            try:
4931                # TODO: Why did this used to be rindex? Was that
4932                # important in some case? (That doesn't work since the
4933                # value may contain a quoted open bracket).
4934                attrStart = stripped.index('[')
4935            except ValueError:
4936                raise DotParseError(
4937                    f"Unmatched ']' on line:\n  {repr(nodeLine)}"
4938                )
4939            hasAttrs = parseSimpleDotAttrs(
4940                stripped[attrStart:]
4941            )
4942            stripped = stripped[:attrStart].rstrip()
4943
4944        if ' ' in stripped:
4945            raise DotParseError(
4946                f"Unquoted multi-word node on line:\n  {repr(nodeLine)}"
4947            )
4948        else:
4949            nodeName = stripped
4950
4951    try:
4952        nodeID = int(nodeName)
4953    except ValueError:
4954        raise DotParseError(
4955            f"Node name f{repr(nodeName)} is not an integer on"
4956            f" line:\n {repr(nodeLine)}"
4957        )
4958
4959    return (nodeID, hasAttrs)

Given a line of text from a graphviz dot-format graph (possibly ending in an '[' to indicate attributes to follow, or possible including a '[ ... ]' block with attributes in-line), parses it as a node declaration, returning the ID of the node, along with a boolean indicating whether attributes follow or not. If an inline attribute block is present, the second member of the tuple will be a list of attribute name/value pairs. In that case, all attribute names and values must either be quoted or not include spaces. Examples:

>>> parseDotNode('1')
(1, False)
>>> parseDotNode(' 1 [ ')
(1, True)
>>> parseDotNode(' 1 [ a=b "c d"="e f" ] ')
(1, [('a', 'b'), ('c d', 'e f')])
>>> parseDotNode(' 3 [ name="A = \\"grate:open\\"" ]')
(3, [('name', 'A = "grate:open"')])
>>> parseDotNode('  "1"[')
(1, True)
>>> parseDotNode('  100[')
(100, True)
>>> parseDotNode('  1 2')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  1 [ 2')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  1 2')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  1 [ junk not=attrs ]')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotNode('  \n')
Traceback (most recent call last):
...
DotParseError...
def parseDotAttr(attrLine: str) -> Tuple[str, str]:
4962def parseDotAttr(attrLine: str) -> Tuple[str, str]:
4963    """
4964    Given a line of text from a `graphviz` dot-format graph, parses
4965    it as an attribute (maybe-quoted-attr-name =
4966    maybe-quoted-attr-value). Returns the (maybe-unquoted) attr-name
4967    and the (maybe-unquoted) attr-value as a pair of strings. Raises
4968    a `DotParseError` if the line cannot be parsed as an attribute.
4969    Examples:
4970
4971    >>> parseDotAttr("a=b")
4972    ('a', 'b')
4973    >>> parseDotAttr("  a = b ")
4974    ('a', 'b')
4975    >>> parseDotAttr('"a" = "b"')
4976    ('a', 'b')
4977    >>> parseDotAttr('"a" -> "b"')
4978    Traceback (most recent call last):
4979    ...
4980    exploration.parsing.DotParseError...
4981    >>> parseDotAttr('"a" = "b" c')
4982    Traceback (most recent call last):
4983    ...
4984    exploration.parsing.DotParseError...
4985    >>> parseDotAttr('a')
4986    Traceback (most recent call last):
4987    ...
4988    exploration.parsing.DotParseError...
4989    >>> parseDotAttr('')
4990    Traceback (most recent call last):
4991    ...
4992    exploration.parsing.DotParseError...
4993    >>> parseDotAttr('0 [ name="A" ]')
4994    Traceback (most recent call last):
4995    ...
4996    exploration.parsing.DotParseError...
4997    """
4998    stripped = attrLine.lstrip()
4999    if len(stripped) == 0:
5000        raise DotParseError(
5001            "Empty attribute in dot graph on line:\n  {repr(attrLine)}"
5002        )
5003    if stripped.endswith(']') or stripped.endswith('['):
5004        raise DotParseError(
5005            f"Node attribute ends in '[' or ']' on line:"
5006            f"\n  {repr(attrLine)}"
5007        )
5008    if stripped.startswith('"'):
5009        try:
5010            attrName, rest = utils.unquoted(stripped)
5011        except ValueError:
5012            raise DotParseError(
5013                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5014            )
5015        rest = rest.lstrip()
5016        if len(rest) == 0 or rest[0] != '=':
5017            raise DotParseError(
5018                f"No equals sign following attribute name on"
5019                f" line:\n  {repr(attrLine)}"
5020            )
5021        rest = rest[1:].lstrip()
5022    else:
5023        try:
5024            eqInd = stripped.index('=')
5025        except ValueError:
5026            raise DotParseError(
5027                f"No equals sign in attribute line:"
5028                f"\n  {repr(attrLine)}"
5029            )
5030        attrName = stripped[:eqInd].rstrip()
5031        rest = stripped[eqInd + 1:].lstrip()
5032
5033    if rest[0] == '"':
5034        try:
5035            attrVal, rest = utils.unquoted(rest)
5036        except ValueError:
5037            raise DotParseError(
5038                f"Unmatched quotes in line:\n  {repr(attrLine)}"
5039            )
5040        if rest.strip():
5041            raise DotParseError(
5042                f"Junk after attribute on line:"
5043                f"\n  {repr(attrLine)}"
5044            )
5045    else:
5046        attrVal = rest.rstrip()
5047
5048    return attrName, attrVal

Given a line of text from a graphviz dot-format graph, parses it as an attribute (maybe-quoted-attr-name = maybe-quoted-attr-value). Returns the (maybe-unquoted) attr-name and the (maybe-unquoted) attr-value as a pair of strings. Raises a DotParseError if the line cannot be parsed as an attribute. Examples:

>>> parseDotAttr("a=b")
('a', 'b')
>>> parseDotAttr("  a = b ")
('a', 'b')
>>> parseDotAttr('"a" = "b"')
('a', 'b')
>>> parseDotAttr('"a" -> "b"')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('"a" = "b" c')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('a')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotAttr('0 [ name="A" ]')
Traceback (most recent call last):
...
DotParseError...
def parseDotEdge(edgeLine: str) -> Tuple[int, int, bool]:
5051def parseDotEdge(edgeLine: str) -> Tuple[int, int, bool]:
5052    """
5053    Given a line of text from a `graphviz` dot-format graph, parses
5054    it as an edge (fromID -> toID). Returns a tuple containing the
5055    from ID, the to ID, and a boolean indicating whether attributes
5056    follow the edge on subsequent lines (true if the line ends with
5057    '['). Raises a `DotParseError` if the line cannot be parsed as
5058    an edge pair. Examples:
5059
5060    >>> parseDotEdge("1 -> 2")
5061    (1, 2, False)
5062    >>> parseDotEdge("  1 -> 2 ")
5063    (1, 2, False)
5064    >>> parseDotEdge('"1" -> "2"')
5065    (1, 2, False)
5066    >>> parseDotEdge('"1" -> "2" [')
5067    (1, 2, True)
5068    >>> parseDotEdge("a -> b")
5069    Traceback (most recent call last):
5070    ...
5071    exploration.parsing.DotParseError...
5072    >>> parseDotEdge('"1" = "1"')
5073    Traceback (most recent call last):
5074    ...
5075    exploration.parsing.DotParseError...
5076    >>> parseDotEdge('"1" -> "2" c')
5077    Traceback (most recent call last):
5078    ...
5079    exploration.parsing.DotParseError...
5080    >>> parseDotEdge('1')
5081    Traceback (most recent call last):
5082    ...
5083    exploration.parsing.DotParseError...
5084    >>> parseDotEdge('')
5085    Traceback (most recent call last):
5086    ...
5087    exploration.parsing.DotParseError...
5088    """
5089    stripped = edgeLine.lstrip()
5090    if len(stripped) == 0:
5091        raise DotParseError(
5092            "Empty edge in dot graph on line:\n  {repr(edgeLine)}"
5093        )
5094    if stripped.startswith('"'):
5095        try:
5096            fromStr, rest = utils.unquoted(stripped)
5097        except ValueError:
5098            raise DotParseError(
5099                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5100            )
5101        rest = rest.lstrip()
5102        if rest[:2] != '->':
5103            raise DotParseError(
5104                f"No arrow sign following source name on"
5105                f" line:\n  {repr(edgeLine)}"
5106            )
5107        rest = rest[2:].lstrip()
5108    else:
5109        try:
5110            arrowInd = stripped.index('->')
5111        except ValueError:
5112            raise DotParseError(
5113                f"No arrow in edge line:"
5114                f"\n  {repr(edgeLine)}"
5115            )
5116        fromStr = stripped[:arrowInd].rstrip()
5117        rest = stripped[arrowInd + 2:].lstrip()
5118        if ' ' in fromStr:
5119            raise DotParseError(
5120                f"Unquoted multi-word edge source on line:"
5121                f"\n  {repr(edgeLine)}"
5122            )
5123
5124    hasAttrs = False
5125    if rest[0] == '"':
5126        try:
5127            toStr, rest = utils.unquoted(rest)
5128        except ValueError:
5129            raise DotParseError(
5130                f"Unmatched quotes in line:\n  {repr(edgeLine)}"
5131            )
5132        stripped = rest.strip()
5133        if stripped == '[':
5134            hasAttrs = True
5135        elif stripped:
5136            raise DotParseError(
5137                f"Junk after edge on line:"
5138                f"\n  {repr(edgeLine)}"
5139            )
5140    else:
5141        toStr = rest.rstrip()
5142        if toStr.endswith('['):
5143            toStr = toStr[:-1].rstrip()
5144            hasAttrs = True
5145        if ' ' in toStr:
5146            raise DotParseError(
5147                f"Unquoted multi-word edge destination on line:"
5148                f"\n  {repr(edgeLine)}"
5149            )
5150
5151    try:
5152        fromID = int(fromStr)
5153    except ValueError:
5154        raise DotParseError(
5155            f"Invalid 'from' ID: {repr(fromStr)} on line:"
5156            f"\n  {repr(edgeLine)}"
5157        )
5158
5159    try:
5160        toID = int(toStr)
5161    except ValueError:
5162        raise DotParseError(
5163            f"Invalid 'to' ID: {repr(toStr)} on line:"
5164            f"\n  {repr(edgeLine)}"
5165        )
5166
5167    return (fromID, toID, hasAttrs)

Given a line of text from a graphviz dot-format graph, parses it as an edge (fromID -> toID). Returns a tuple containing the from ID, the to ID, and a boolean indicating whether attributes follow the edge on subsequent lines (true if the line ends with '['). Raises a DotParseError if the line cannot be parsed as an edge pair. Examples:

>>> parseDotEdge("1 -> 2")
(1, 2, False)
>>> parseDotEdge("  1 -> 2 ")
(1, 2, False)
>>> parseDotEdge('"1" -> "2"')
(1, 2, False)
>>> parseDotEdge('"1" -> "2" [')
(1, 2, True)
>>> parseDotEdge("a -> b")
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('"1" = "1"')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('"1" -> "2" c')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('1')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotEdge('')
Traceback (most recent call last):
...
DotParseError...
def parseDotAttrList(lines: List[str]) -> Tuple[List[Tuple[str, str]], List[str]]:
5170def parseDotAttrList(
5171    lines: List[str]
5172) -> Tuple[List[Tuple[str, str]], List[str]]:
5173    """
5174    Given a list of lines of text from a `graphviz` dot-format
5175    graph which starts with an attribute line, parses multiple
5176    attribute lines until a line containing just ']' is found.
5177    Returns a list of the parsed name/value attribute pair tuples,
5178    along with a list of remaining unparsed strings (not counting
5179    the closing ']' line). Raises a `DotParseError` if it finds a
5180    non-attribute line or if it fails to find a closing ']' line.
5181    Examples:
5182
5183    >>> parseDotAttrList([
5184    ...     'a=b\\n',
5185    ...     'c=d\\n',
5186    ...     ']\\n',
5187    ... ])
5188    ([('a', 'b'), ('c', 'd')], [])
5189    >>> parseDotAttrList([
5190    ...     'a=b',
5191    ...     'c=d',
5192    ...     '  ]',
5193    ...     'more',
5194    ...     'lines',
5195    ... ])
5196    ([('a', 'b'), ('c', 'd')], ['more', 'lines'])
5197    >>> parseDotAttrList([
5198    ...     'a=b',
5199    ...     'c=d',
5200    ... ])
5201    Traceback (most recent call last):
5202    ...
5203    exploration.parsing.DotParseError...
5204    """
5205    index = 0
5206    found = []
5207    while index < len(lines):
5208        thisLine = lines[index]
5209        try:
5210            found.append(parseDotAttr(thisLine))
5211        except DotParseError:
5212            if thisLine.strip() == ']':
5213                return (found, lines[index + 1:])
5214            else:
5215                raise DotParseError(
5216                    f"Could not parse attribute from line:"
5217                    f"\n  {repr(thisLine)}"
5218                    f"\nAttributes block starts on line:"
5219                    f"\n  {repr(lines[0])}"
5220                )
5221        index += 1
5222
5223    raise DotParseError(
5224        f"No list terminator (']') for attributes starting on line:"
5225        f"\n  {repr(lines[0])}"
5226    )

Given a list of lines of text from a graphviz dot-format graph which starts with an attribute line, parses multiple attribute lines until a line containing just ']' is found. Returns a list of the parsed name/value attribute pair tuples, along with a list of remaining unparsed strings (not counting the closing ']' line). Raises a DotParseError if it finds a non-attribute line or if it fails to find a closing ']' line. Examples:

>>> parseDotAttrList([
...     'a=b\n',
...     'c=d\n',
...     ']\n',
... ])
([('a', 'b'), ('c', 'd')], [])
>>> parseDotAttrList([
...     'a=b',
...     'c=d',
...     '  ]',
...     'more',
...     'lines',
... ])
([('a', 'b'), ('c', 'd')], ['more', 'lines'])
>>> parseDotAttrList([
...     'a=b',
...     'c=d',
... ])
Traceback (most recent call last):
...
DotParseError...
def parseDotSubgraphStart(line: str) -> str:
5229def parseDotSubgraphStart(line: str) -> str:
5230    """
5231    Parses the start of a subgraph from a line of a graph file. The
5232    line must start with the word 'subgraph' and then have a name,
5233    followed by a '{' at the end of the line. Raises a
5234    `DotParseError` if this format doesn't match. Examples:
5235
5236    >>> parseDotSubgraphStart('subgraph A {')
5237    'A'
5238    >>> parseDotSubgraphStart('subgraph A B {')
5239    Traceback (most recent call last):
5240    ...
5241    exploration.parsing.DotParseError...
5242    >>> parseDotSubgraphStart('subgraph "A B" {')
5243    'A B'
5244    >>> parseDotSubgraphStart('subgraph A')
5245    Traceback (most recent call last):
5246    ...
5247    exploration.parsing.DotParseError...
5248    """
5249    stripped = line.strip()
5250    if len(stripped) == 0:
5251        raise DotParseError(
5252            f"Empty line where subgraph was expected:"
5253            f"\n  {repr(line)}"
5254        )
5255
5256    if not stripped.startswith('subgraph '):
5257        raise DotParseError(
5258            f"Subgraph doesn't start with 'subgraph' on line:"
5259            f"\n  {repr(line)}"
5260        )
5261
5262    stripped = stripped[9:]
5263    if stripped.startswith('"'):
5264        try:
5265            name, rest = utils.unquoted(stripped)
5266        except ValueError:
5267            raise DotParseError(
5268                f"Malformed quotes on subgraph line:\n {repr(line)}"
5269            )
5270        if rest.strip() != '{':
5271            raise DotParseError(
5272                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5273            )
5274    else:
5275        parts = stripped.split()
5276        if len(parts) != 2 or parts[1] != '{':
5277            raise DotParseError(
5278                f"Junk or missing '{{' on subgraph line:\n {repr(line)}"
5279            )
5280        name, _ = parts
5281
5282    return name

Parses the start of a subgraph from a line of a graph file. The line must start with the word 'subgraph' and then have a name, followed by a '{' at the end of the line. Raises a DotParseError if this format doesn't match. Examples:

>>> parseDotSubgraphStart('subgraph A {')
'A'
>>> parseDotSubgraphStart('subgraph A B {')
Traceback (most recent call last):
...
DotParseError...
>>> parseDotSubgraphStart('subgraph "A B" {')
'A B'
>>> parseDotSubgraphStart('subgraph A')
Traceback (most recent call last):
...
DotParseError...
def parseDotGraphContents(lines: List[str]) -> Tuple[ParsedDotGraph, List[str]]:
5285def parseDotGraphContents(
5286    lines: List[str]
5287) -> Tuple[ParsedDotGraph, List[str]]:
5288    """
5289    Given a list of lines from a `graphviz` dot-format string,
5290    parses the list as the contents of a graph (or subgraph),
5291    stopping when it reaches a line that just contains '}'. Raises a
5292    `DotParseError` if it cannot do so or if the terminator is
5293    missing. Returns a tuple containing the parsed graph data (see
5294    `ParsedDotGraph` and the list of remaining lines after the
5295    terminator. Recursively parses subgraphs. Example:
5296
5297    >>> bits = parseDotGraphContents([
5298    ...     '"graph attr"=1',
5299    ...     '1 [',
5300    ...     '  attr=value',
5301    ...     ']',
5302    ...     '1 -> 2 [',
5303    ...     '  fullLabel="to_B"',
5304    ...     '  quality=number',
5305    ...     ']',
5306    ...     'subgraph name {',
5307    ...     '  300',
5308    ...     '  400',
5309    ...     '  300 -> 400 [',
5310    ...     '    fullLabel=forward',
5311    ...     '  ]',
5312    ...     '}',
5313    ...     '}',
5314    ... ])
5315    >>> len(bits)
5316    2
5317    >>> g = bits[0]
5318    >>> bits[1]
5319    []
5320    >>> sorted(g.keys())
5321    ['attrs', 'edges', 'nodes', 'subgraphs']
5322    >>> g['nodes']
5323    [(1, [('attr', 'value')])]
5324    >>> g['edges']
5325    [(1, 2, [('fullLabel', 'to_B'), ('quality', 'number')])]
5326    >>> g['attrs']
5327    [('graph attr', '1')]
5328    >>> sgs = g['subgraphs']
5329    >>> len(sgs)
5330    1
5331    >>> len(sgs[0])
5332    2
5333    >>> sgs[0][0]
5334    'name'
5335    >>> sg = sgs[0][1]
5336    >>> sorted(sg.keys())
5337    ['attrs', 'edges', 'nodes', 'subgraphs']
5338    >>> sg["nodes"]
5339    [(300, []), (400, [])]
5340    >>> sg["edges"]
5341    [(300, 400, [('fullLabel', 'forward')])]
5342    >>> sg["attrs"]
5343    []
5344    >>> sg["subgraphs"]
5345    []
5346    """
5347    result: ParsedDotGraph = {
5348        'nodes': [],
5349        'edges': [],
5350        'attrs': [],
5351        'subgraphs': [],
5352    }
5353    index = 0
5354    remainder = None
5355    # Consider each line:
5356    while index < len(lines):
5357        # Grab line and pre-increment index
5358        thisLine = lines[index]
5359        index += 1
5360
5361        # Check for } first because it could be parsed as a node
5362        stripped = thisLine.strip()
5363        if stripped == '}':
5364            remainder = lines[index:]
5365            break
5366        elif stripped == '':  # ignore blank lines
5367            continue
5368
5369        # Cascading parsing attempts, since the possibilities are
5370        # mostly mutually exclusive.
5371        # TODO: Node/attr confusion with = in a node name?
5372        try:
5373            attrName, attrVal = parseDotAttr(thisLine)
5374            result['attrs'].append((attrName, attrVal))
5375        except DotParseError:
5376            try:
5377                fromNode, toNode, hasEAttrs = parseDotEdge(
5378                    thisLine
5379                )
5380                if hasEAttrs:
5381                    attrs, rest = parseDotAttrList(
5382                        lines[index:]
5383                    )
5384                    # Restart to process rest
5385                    lines = rest
5386                    index = 0
5387                else:
5388                    attrs = []
5389                result['edges'].append((fromNode, toNode, attrs))
5390            except DotParseError:
5391                try:
5392                    nodeName, hasNAttrs = parseDotNode(
5393                        thisLine
5394                    )
5395                    if hasNAttrs is True:
5396                        attrs, rest = parseDotAttrList(
5397                            lines[index:]
5398                        )
5399                        # Restart to process rest
5400                        lines = rest
5401                        index = 0
5402                    elif hasNAttrs:
5403                        attrs = hasNAttrs
5404                    else:
5405                        attrs = []
5406                    result['nodes'].append((nodeName, attrs))
5407                except DotParseError:
5408                    try:
5409                        subName = parseDotSubgraphStart(
5410                            thisLine
5411                        )
5412                        subStuff, rest = \
5413                            parseDotGraphContents(
5414                                lines[index:]
5415                            )
5416                        result['subgraphs'].append((subName, subStuff))
5417                        # Restart to process rest
5418                        lines = rest
5419                        index = 0
5420                    except DotParseError:
5421                        raise DotParseError(
5422                            f"Unrecognizable graph line (possibly"
5423                            f" beginning of unfinished structure):"
5424                            f"\n  {repr(thisLine)}"
5425                        )
5426    if remainder is None:
5427        raise DotParseError(
5428            f"Graph (or subgraph) is missing closing '}}'. Starts"
5429            f" on line:\n  {repr(lines[0])}"
5430        )
5431    else:
5432        return (result, remainder)

Given a list of lines from a graphviz dot-format string, parses the list as the contents of a graph (or subgraph), stopping when it reaches a line that just contains '}'. Raises a DotParseError if it cannot do so or if the terminator is missing. Returns a tuple containing the parsed graph data (see ParsedDotGraph and the list of remaining lines after the terminator. Recursively parses subgraphs. Example:

>>> bits = parseDotGraphContents([
...     '"graph attr"=1',
...     '1 [',
...     '  attr=value',
...     ']',
...     '1 -> 2 [',
...     '  fullLabel="to_B"',
...     '  quality=number',
...     ']',
...     'subgraph name {',
...     '  300',
...     '  400',
...     '  300 -> 400 [',
...     '    fullLabel=forward',
...     '  ]',
...     '}',
...     '}',
... ])
>>> len(bits)
2
>>> g = bits[0]
>>> bits[1]
[]
>>> sorted(g.keys())
['attrs', 'edges', 'nodes', 'subgraphs']
>>> g['nodes']
[(1, [('attr', 'value')])]
>>> g['edges']
[(1, 2, [('fullLabel', 'to_B'), ('quality', 'number')])]
>>> g['attrs']
[('graph attr', '1')]
>>> sgs = g['subgraphs']
>>> len(sgs)
1
>>> len(sgs[0])
2
>>> sgs[0][0]
'name'
>>> sg = sgs[0][1]
>>> sorted(sg.keys())
['attrs', 'edges', 'nodes', 'subgraphs']
>>> sg["nodes"]
[(300, []), (400, [])]
>>> sg["edges"]
[(300, 400, [('fullLabel', 'forward')])]
>>> sg["attrs"]
[]
>>> sg["subgraphs"]
[]
def parseDot( dotStr: str, parseFormat: ParseFormat = <ParseFormat object>) -> exploration.core.DecisionGraph:
5435def parseDot(
5436    dotStr: str,
5437    parseFormat: ParseFormat = ParseFormat()
5438) -> core.DecisionGraph:
5439    """
5440    Converts a `graphviz` dot-format string into a `core.DecisionGraph`.
5441    A custom `ParseFormat` may be specified if desired; the default
5442    `ParseFormat` is used if not. Note that this relies on specific
5443    indentation schemes used by `toDot` so a hand-edited dot-format
5444    graph will probably not work. A `DotParseError` is raised if the
5445    provided string can't be parsed. Example
5446
5447    >>> parseDotNode(' 3 [ label="A = \\\\"grate:open\\\\"" ]')
5448    (3, [('label', 'A = "grate:open"')])
5449    >>> sg = '''\
5450    ... subgraph __requirements__ {
5451    ...   3 [ label="A = \\\\"grate:open\\\\"" ]
5452    ...   4 [ label="B = \\\\"!(helmet)\\\\"" ]
5453    ...   5 [ label="C = \\\\"helmet\\\\"" ]
5454    ... }'''
5455    >>> parseDotGraphContents(sg.splitlines()[1:])
5456    ({'nodes': [(3, [('label', 'A = "grate:open"')]),\
5457 (4, [('label', 'B = "!(helmet)"')]), (5, [('label', 'C = "helmet"')])],\
5458 'edges': [], 'attrs': [], 'subgraphs': []}, [])
5459    >>> from . import core
5460    >>> dg = core.DecisionGraph.example('simple')
5461    >>> encoded = toDot(dg)
5462    >>> reconstructed = parseDot(encoded)
5463    >>> for diff in dg.listDifferences(reconstructed):
5464    ...     print(diff)
5465    >>> reconstructed == dg
5466    True
5467    >>> dg = core.DecisionGraph.example('abc')
5468    >>> encoded = toDot(dg)
5469    >>> reconstructed = parseDot(encoded)
5470    >>> for diff in dg.listDifferences(reconstructed):
5471    ...     print(diff)
5472    >>> reconstructed == dg
5473    True
5474    >>> tg = core.DecisionGraph()
5475    >>> tg.addDecision('A')
5476    0
5477    >>> tg.addDecision('B')
5478    1
5479    >>> tg.addTransition('A', 'up', 'B', 'down')
5480    >>> same = parseDot('''
5481    ... digraph {
5482    ...     0 [ name=A label=A ]
5483    ...       0 -> 1 [
5484    ...         label=up
5485    ...         fullLabel=up
5486    ...         reciprocal=down
5487    ...       ]
5488    ...     1 [ name=B label=B ]
5489    ...       1 -> 0 [
5490    ...         label=down
5491    ...         fullLabel=down
5492    ...         reciprocal=up
5493    ...       ]
5494    ... }''')
5495    >>> for diff in tg.listDifferences(same):
5496    ...     print(diff)
5497    >>> same == tg
5498    True
5499    >>> pf = ParseFormat()
5500    >>> tg.setTransitionRequirement('A', 'up', pf.parseRequirement('one|two'))
5501    >>> tg.setConsequence(
5502    ...     'B',
5503    ...     'down',
5504    ...     [base.effect(gain="one")]
5505    ... )
5506    >>> test = parseDot('''
5507    ...   digraph {
5508    ...     0 [ name="A = \\\\"one|two\\\\"" label="A = \\\\"one|two\\\\"" ]
5509    ...   }
5510    ... ''')
5511    >>> list(test.nodes)
5512    [0]
5513    >>> test.nodes[0]['name']
5514    'A = "one|two"'
5515    >>> eff = (
5516    ...   r'"A = \\"[{\\\\\\"type\\\\\\": \\\\\\"gain\\\\\\",'
5517    ...   r' \\\\\\"applyTo\\\\\\": \\\\\\"active\\\\\\",'
5518    ...   r' \\\\\\"value\\\\\\": \\\\\\"one\\\\\\",'
5519    ...   r' \\\\\\"charges\\\\\\": null, \\\\\\"hidden\\\\\\": false,'
5520    ...   r' \\\\\\"delay\\\\\\": null}]\\""'
5521    ... )
5522    >>> utils.unquoted(eff)[1]
5523    ''
5524    >>> test2 = parseDot(
5525    ...     'digraph {\\n 0 [ name=' + eff + ' label=' + eff + ' ]\\n}'
5526    ... )
5527    >>> s = test2.nodes[0]['name']
5528    >>> s[:25]
5529    'A = "[{\\\\"type\\\\": \\\\"gain\\\\"'
5530    >>> s[25:50]
5531    ', \\\\"applyTo\\\\": \\\\"active\\\\"'
5532    >>> s[50:70]
5533    ', \\\\"value\\\\": \\\\"one\\\\"'
5534    >>> s[70:89]
5535    ', \\\\"charges\\\\": null'
5536    >>> s[89:108]
5537    ', \\\\"hidden\\\\": false'
5538    >>> s[108:]
5539    ', \\\\"delay\\\\": null}]"'
5540    >>> ae = s[s.index('=') + 1:].strip()
5541    >>> uq, after = utils.unquoted(ae)
5542    >>> after
5543    ''
5544    >>> fromJSON(uq) == [base.effect(gain="one")]
5545    True
5546    >>> same = parseDot('''
5547    ... digraph {
5548    ...   0 [ name=A label=A ]
5549    ...     0 -> 1 [
5550    ...       label=up
5551    ...       fullLabel=up
5552    ...       reciprocal=down
5553    ...       req=A
5554    ...     ]
5555    ...   1 [ name=B label=B ]
5556    ...     1 -> 0 [
5557    ...       label=down
5558    ...       fullLabel=down
5559    ...       reciprocal=up
5560    ...       consequence=A
5561    ...     ]
5562    ...   subgraph __requirements__ {
5563    ...     2 [ label="A = \\\\"one|two\\\\"" ]
5564    ...   }
5565    ...   subgraph __consequences__ {
5566    ...     3 [ label=''' + eff + ''' ]
5567    ...   }
5568    ... }''')
5569    >>> c = {'tags': {}, 'annotations': [], 'reciprocal': 'up', 'consequence': [{'type': 'gain', 'applyTo': 'active', 'value': 'one', 'delay': None, 'charges': None}]}['consequence']  # noqa
5570
5571    >>> for diff in tg.listDifferences(same):
5572    ...     print(diff)
5573    >>> same == tg
5574    True
5575    """
5576    lines = dotStr.splitlines()
5577    while lines[0].strip() == '':
5578        lines.pop(0)
5579    if lines.pop(0).strip() != "digraph {":
5580        raise DotParseError("Input doesn't begin with 'digraph {'.")
5581
5582    # Create our result
5583    result = core.DecisionGraph()
5584
5585    # Parse to intermediate graph data structure
5586    graphStuff, remaining = parseDotGraphContents(lines)
5587    if remaining:
5588        if len(remaining) <= 4:
5589            junk = '\n  '.join(repr(line) for line in remaining)
5590        else:
5591            junk = '\n  '.join(repr(line) for line in remaining[:4])
5592            junk += '\n  ...'
5593        raise DotParseError("Extra junk after graph:\n  {junk}")
5594
5595    # Sort out subgraphs to find legends
5596    zoneSubs = []
5597    reqLegend = None
5598    consequenceLegend = None
5599    mechanismLegend = None
5600    for sub in graphStuff['subgraphs']:
5601        if sub[0] == '__requirements__':
5602            reqLegend = sub[1]
5603        elif sub[0] == '__consequences__':
5604            consequenceLegend = sub[1]
5605        elif sub[0] == '__mechanisms__':
5606            mechanismLegend = sub[1]
5607        else:
5608            zoneSubs.append(sub)
5609
5610    # Build out our mapping from requirement abbreviations to actual
5611    # requirement objects
5612    reqMap: Dict[str, base.Requirement] = {}
5613    if reqLegend is not None:
5614        if reqLegend['edges']:
5615            raise DotParseError(
5616                f"Requirements legend subgraph has edges:"
5617                f"\n  {repr(reqLegend['edges'])}"
5618                f"\n(It should only have nodes.)"
5619            )
5620        if reqLegend['attrs']:
5621            raise DotParseError(
5622                f"Requirements legend subgraph has attributes:"
5623                f"\n  {repr(reqLegend['attrs'])}"
5624                f"\n(It should only have nodes.)"
5625            )
5626        if reqLegend['subgraphs']:
5627            raise DotParseError(
5628                f"Requirements legend subgraph has subgraphs:"
5629                f"\n  {repr(reqLegend['subgraphs'])}"
5630                f"\n(It should only have nodes.)"
5631            )
5632        for node, attrs in reqLegend['nodes']:
5633            if not attrs:
5634                raise DotParseError(
5635                    f"Node in requirements legend missing attributes:"
5636                    f"\n  {repr(attrs)}"
5637                )
5638            if len(attrs) != 1:
5639                raise DotParseError(
5640                    f"Node in requirements legend has multiple"
5641                    f" attributes:\n  {repr(attrs)}"
5642                )
5643            reqStr = attrs[0][1]
5644            try:
5645                eqInd = reqStr.index('=')
5646            except ValueError:
5647                raise DotParseError(
5648                    f"Missing '=' in requirement specifier:"
5649                    f"\n  {repr(reqStr)}"
5650                )
5651            ab = reqStr[:eqInd].rstrip()
5652            encoded = reqStr[eqInd + 1:].lstrip()
5653            try:
5654                encVal, empty = utils.unquoted(encoded)
5655            except ValueError:
5656                raise DotParseError(
5657                    f"Invalid quoted requirement value:"
5658                    f"\n  {repr(encoded)}"
5659                )
5660            if empty.strip():
5661                raise DotParseError(
5662                    f"Extra junk after requirement value:"
5663                    f"\n  {repr(empty)}"
5664                )
5665            try:
5666                req = parseFormat.parseRequirement(encVal)
5667            except ValueError:
5668                raise DotParseError(
5669                    f"Invalid encoded requirement in requirements"
5670                    f" legend:\n  {repr(encVal)}"
5671                )
5672            if ab in reqMap:
5673                raise DotParseError(
5674                    f"Abbreviation '{ab}' was defined multiple"
5675                    f" times in requirements legend."
5676                )
5677            reqMap[ab] = req
5678
5679    # Build out our mapping from consequence abbreviations to actual
5680    # consequence lists
5681    consequenceMap: Dict[str, base.Consequence] = {}
5682    if consequenceLegend is not None:
5683        if consequenceLegend['edges']:
5684            raise DotParseError(
5685                f"Consequences legend subgraph has edges:"
5686                f"\n  {repr(consequenceLegend['edges'])}"
5687                f"\n(It should only have nodes.)"
5688            )
5689        if consequenceLegend['attrs']:
5690            raise DotParseError(
5691                f"Consequences legend subgraph has attributes:"
5692                f"\n  {repr(consequenceLegend['attrs'])}"
5693                f"\n(It should only have nodes.)"
5694            )
5695        if consequenceLegend['subgraphs']:
5696            raise DotParseError(
5697                f"Consequences legend subgraph has subgraphs:"
5698                f"\n  {repr(consequenceLegend['subgraphs'])}"
5699                f"\n(It should only have nodes.)"
5700            )
5701        for node, attrs in consequenceLegend['nodes']:
5702            if not attrs:
5703                raise DotParseError(
5704                    f"Node in consequence legend missing attributes:"
5705                    f"\n  {repr(attrs)}"
5706                )
5707            if len(attrs) != 1:
5708                raise DotParseError(
5709                    f"Node in consequences legend has multiple"
5710                    f" attributes:\n  {repr(attrs)}"
5711                )
5712            consStr = attrs[0][1]
5713            try:
5714                eqInd = consStr.index('=')
5715            except ValueError:
5716                raise DotParseError(
5717                    f"Missing '=' in consequence string:"
5718                    f"\n  {repr(consStr)}"
5719                )
5720            ab = consStr[:eqInd].rstrip()
5721            encoded = consStr[eqInd + 1:].lstrip()
5722            try:
5723                encVal, empty = utils.unquoted(encoded)
5724            except ValueError:
5725                raise DotParseError(
5726                    f"Invalid quoted consequence value:"
5727                    f"\n  {repr(encoded)}"
5728                )
5729            if empty.strip():
5730                raise DotParseError(
5731                    f"Extra junk after consequence value:"
5732                    f"\n  {repr(empty)}"
5733                )
5734            try:
5735                consequences = fromJSON(encVal)
5736            except json.decoder.JSONDecodeError:
5737                raise DotParseError(
5738                    f"Invalid encoded consequence in requirements"
5739                    f" legend:\n  {repr(encVal)}"
5740                )
5741            if ab in consequenceMap:
5742                raise DotParseError(
5743                    f"Abbreviation '{ab}' was defined multiple"
5744                    f" times in effects legend."
5745                )
5746            consequenceMap[ab] = consequences
5747
5748    # Reconstruct mechanisms
5749    if mechanismLegend is not None:
5750        if mechanismLegend['edges']:
5751            raise DotParseError(
5752                f"Mechanisms legend subgraph has edges:"
5753                f"\n  {repr(mechanismLegend['edges'])}"
5754                f"\n(It should only have nodes.)"
5755            )
5756        if mechanismLegend['attrs']:
5757            raise DotParseError(
5758                f"Mechanisms legend subgraph has attributes:"
5759                f"\n  {repr(mechanismLegend['attrs'])}"
5760                f"\n(It should only have nodes.)"
5761            )
5762        if mechanismLegend['subgraphs']:
5763            raise DotParseError(
5764                f"Mechanisms legend subgraph has subgraphs:"
5765                f"\n  {repr(mechanismLegend['subgraphs'])}"
5766                f"\n(It should only have nodes.)"
5767            )
5768        for node, attrs in mechanismLegend['nodes']:
5769            if not attrs:
5770                raise DotParseError(
5771                    f"Node in mechanisms legend missing attributes:"
5772                    f"\n  {repr(attrs)}"
5773                )
5774            if len(attrs) != 1:
5775                raise DotParseError(
5776                    f"Node in mechanisms legend has multiple"
5777                    f" attributes:\n  {repr(attrs)}"
5778                )
5779            mechStr = attrs[0][1]
5780            try:
5781                atInd = mechStr.index('@')
5782                colonInd = mechStr.index(':')
5783            except ValueError:
5784                raise DotParseError(
5785                    f"Missing '@' or ':' in mechanism string:"
5786                    f"\n  {repr(mechStr)}"
5787                )
5788            if atInd > colonInd:
5789                raise DotParseError(
5790                    f"':' after '@' in mechanism string:"
5791                    f"\n  {repr(mechStr)}"
5792                )
5793            mID: base.MechanismID
5794            where: Optional[base.DecisionID]
5795            mName: base.MechanismName
5796            try:
5797                mID = int(mechStr[:atInd].rstrip())
5798            except ValueError:
5799                raise DotParseError(
5800                    f"Invalid mechanism ID in mechanism string:"
5801                    f"\n  {repr(mechStr)}"
5802                )
5803            try:
5804                whereStr = mechStr[atInd + 1:colonInd].strip()
5805                if whereStr == "None":
5806                    where = None
5807                else:
5808                    where = int(whereStr)
5809            except ValueError:
5810                raise DotParseError(
5811                    f"Invalid mechanism location in mechanism string:"
5812                    f"\n  {repr(mechStr)}"
5813                )
5814            mName, rest = utils.unquoted(mechStr[colonInd + 1:].lstrip())
5815            if rest.strip():
5816                raise DotParseError(
5817                    f"Junk after mechanism name in mechanism string:"
5818                    f"\n  {repr(mechStr)}"
5819                )
5820            result.mechanisms[mID] = (where, mName)
5821            if where is None:
5822                result.globalMechanisms[mName] = mID
5823
5824    # Add zones to the graph based on parent info
5825    # Map from zones to children we should add to them once all
5826    # zones are created:
5827    zoneChildMap: Dict[str, List[str]] = {}
5828    for prefixedName, graphData in zoneSubs:
5829        # Chop off cluster_ or _ prefix:
5830        zoneName = prefixedName[prefixedName.index('_') + 1:]
5831        if graphData['edges']:
5832            raise DotParseError(
5833                f"Zone subgraph for zone {repr(zoneName)} has edges:"
5834                f"\n  {repr(graphData['edges'])}"
5835                f"\n(It should only have nodes and attributes.)"
5836            )
5837        if graphData['subgraphs']:
5838            raise DotParseError(
5839                f"Zone subgraph for zone {repr(zoneName)} has"
5840                f" subgraphs:"
5841                f"\n  {repr(graphData['subgraphs'])}"
5842                f"\n(It should only have nodes and attributes.)"
5843            )
5844        # Note: we ignore nodes as that info is used for
5845        # visualization but is redundant with the zone parent info
5846        # stored in nodes, and it would be tricky to tease apart
5847        # direct vs. indirect relationships from merged info.
5848        parents = None
5849        level = None
5850        for attr, aVal in graphData['attrs']:
5851            if attr == 'parents':
5852                try:
5853                    parents = set(fromJSON(aVal))
5854                except json.decoder.JSONDecodeError:
5855                    raise DotParseError(
5856                        f"Invalid parents JSON in zone subgraph for"
5857                        f" zone '{zoneName}':\n  {repr(aVal)}"
5858                    )
5859            elif attr == 'level':
5860                try:
5861                    level = int(aVal)
5862                except ValueError:
5863                    raise DotParseError(
5864                        f"Invalid level in zone subgraph for"
5865                        f" zone '{zoneName}':\n  {repr(aVal)}"
5866                    )
5867            elif attr == 'label':
5868                pass  # name already extracted from the subgraph name
5869
5870            else:
5871                raise DotParseError(
5872                    f"Unexpected attribute '{attr}' in zone"
5873                    f" subgraph for zone '{zoneName}'"
5874                )
5875        if parents is None:
5876            raise DotParseError(
5877                f"No parents attribute for zone '{zoneName}'."
5878                f" Graph is:\n  {repr(graphData)}"
5879            )
5880        if level is None:
5881            raise DotParseError(
5882                f"No level attribute for zone '{zoneName}'."
5883                f" Graph is:\n  {repr(graphData)}"
5884            )
5885
5886        # Add ourself to our parents in the child map
5887        for parent in parents:
5888            zoneChildMap.setdefault(parent, []).append(zoneName)
5889
5890        # Create this zone
5891        result.createZone(zoneName, level)
5892
5893    # Add zone parent/child relationships
5894    for parent, children in zoneChildMap.items():
5895        for child in children:
5896            result.addZoneToZone(child, parent)
5897
5898    # Add nodes to the graph
5899    for (node, attrs) in graphStuff['nodes']:
5900        name: Optional[str] = None
5901        annotations = []
5902        tags: Dict[base.Tag, base.TagValue] = {}
5903        zones = []
5904        for attr, aVal in attrs:
5905            if attr == 'name':  # it's the name
5906                name = aVal
5907            elif attr == 'label':  # zone + name; redundant
5908                pass
5909            elif attr.startswith('t_'):  # it's a tag
5910                tagName = attr[2:]
5911                try:
5912                    tagAny = fromJSON(aVal)
5913                except json.decoder.JSONDecodeError:
5914                    raise DotParseError(
5915                        f"Error in JSON for tag attr '{attr}' of node"
5916                        f" '{node}'"
5917                    )
5918                if isinstance(tagAny, base.TagValueTypes):
5919                    tagVal: base.TagValue = cast(base.TagValue, tagAny)
5920                else:
5921                    raise DotParseError(
5922                        f"JSON for tag value encodes disallowed tag"
5923                        f" value of type {type(tagAny)}. Value is:"
5924                        f"\n  {repr(tagAny)}"
5925                    )
5926                tags[tagName] = tagVal
5927            elif attr.startswith('z_'):  # it's a zone
5928                zones.append(attr[2:])
5929            elif attr == 'annotations':  # It's the annotations
5930                try:
5931                    annotations = fromJSON(aVal)
5932                except json.decoder.JSONDecodeError:
5933                    raise DotParseError(
5934                        f"Bad JSON in attribute '{attr}' of node"
5935                        f" '{node}'"
5936                    )
5937            else:
5938                raise DotParseError(
5939                    f"Unrecognized node attribute '{attr}' for node"
5940                    f" '{node}'"
5941                )
5942
5943        # TODO: Domains here?
5944        if name is None:
5945            raise DotParseError(f"Node '{node}' does not have a name.")
5946
5947        result.addIdentifiedDecision(
5948            node,
5949            name,
5950            tags=tags,
5951            annotations=annotations
5952        )
5953        for zone in zones:
5954            try:
5955                result.addDecisionToZone(node, zone)
5956            except core.MissingZoneError:
5957                raise DotParseError(
5958                    f"Zone '{zone}' for node {node} does not"
5959                    f" exist."
5960                )
5961
5962    # Add mechanisms to each node:
5963    for (mID, (where, mName)) in result.mechanisms.items():
5964        mPool = result.nodes[where].setdefault('mechanisms', {})
5965        if mName in mPool:
5966            raise DotParseError(
5967                f"Multiple mechanisms named {mName!r} at"
5968                f" decision {where}."
5969            )
5970        mPool[mName] = mID
5971
5972    # Reciprocals to double-check once all edges are added
5973    recipChecks: Dict[
5974        Tuple[base.DecisionID, base.Transition],
5975        base.Transition
5976    ] = {}
5977
5978    # Add each edge
5979    for (source, dest, attrs) in graphStuff['edges']:
5980        annotations = []
5981        tags = {}
5982        label = None
5983        requirements = None
5984        consequence = None
5985        reciprocal = None
5986        for attr, aVal in attrs:
5987            if attr.startswith('t_'):
5988                try:
5989                    tags[attr[2:]] = fromJSON(aVal)
5990                except json.decoder.JSONDecodeError:
5991                    raise DotParseError(
5992                        f"Invalid JSON in edge tag '{attr}' for edge"
5993                        f"from '{source}' to '{dest}':"
5994                        f"\n  {repr(aVal)}"
5995                    )
5996            elif attr == "label":  # We ignore the short-label
5997                pass
5998            elif attr == "fullLabel":  # This is our transition name
5999                label = aVal
6000            elif attr == "reciprocal":
6001                reciprocal = aVal
6002            elif attr == "req":
6003                reqAbbr = aVal
6004                if reqAbbr not in reqMap:
6005                    raise DotParseError(
6006                        f"Edge from '{source}' to '{dest}' has"
6007                        f" requirement abbreviation '{reqAbbr}'"
6008                        f" but that abbreviation was not listed"
6009                        f" in the '__requirements__' subgraph."
6010                    )
6011                requirements = reqMap[reqAbbr]
6012            elif attr == "consequence":
6013                consequenceAbbr = aVal
6014                if consequenceAbbr not in reqMap:
6015                    raise DotParseError(
6016                        f"Edge from '{source}' to '{dest}' has"
6017                        f" consequence abbreviation"
6018                        f" '{consequenceAbbr}' but that"
6019                        f" abbreviation was not listed in the"
6020                        f" '__consequences__' subgraph."
6021                    )
6022                consequence = consequenceMap[consequenceAbbr]
6023            elif attr == "annotations":
6024                try:
6025                    annotations = fromJSON(aVal)
6026                except json.decoder.JSONDecodeError:
6027                    raise DotParseError(
6028                        f"Invalid JSON in edge annotations for"
6029                        f" edge from '{source}' to '{dest}':"
6030                        f"\n  {repr(aVal)}"
6031                    )
6032            else:
6033                raise DotParseError(
6034                    f"Unrecognized edge attribute '{attr}' for edge"
6035                    f" from '{source}' to '{dest}'"
6036                )
6037
6038        if label is None:
6039            raise DotParseError(
6040                f"Edge from '{source}' to '{dest}' is missing"
6041                f" a 'fullLabel' attribute."
6042            )
6043
6044        # Add the requested transition
6045        result.addTransition(
6046            source,
6047            label,
6048            dest,
6049            tags=tags,
6050            annotations=annotations,
6051            requires=requirements,  # None works here
6052            consequence=consequence  # None works here
6053        )
6054        # Either we're first or our reciprocal is, so this will only
6055        # trigger for one of the pair
6056        if reciprocal is not None:
6057            recipDest = result.getDestination(dest, reciprocal)
6058            if recipDest is None:
6059                recipChecks[(source, label)] = reciprocal
6060                # we'll get set as a reciprocal when that edge is
6061                # instantiated, we hope, but let's check that later
6062            elif recipDest != source:
6063                raise DotParseError(
6064                    f"Transition '{label}' from '{source}' to"
6065                    f" '{dest}' lists reciprocal '{reciprocal}'"
6066                    f" but that transition from '{dest}' goes to"
6067                    f" '{recipDest}', not '{source}'."
6068                )
6069            else:
6070                # At this point we know the reciprocal edge exists
6071                # and has the appropriate destination (our source).
6072                # No need to check for a pre-existing reciprocal as
6073                # this edge is newly created and cannot already have
6074                # a reciprocal assigned.
6075                result.setReciprocal(source, label, reciprocal)
6076
6077    # Double-check skipped reciprocals
6078    for ((source, transition), reciprocal) in recipChecks.items():
6079        actual = result.getReciprocal(source, transition)
6080        if actual != reciprocal:
6081            raise DotParseError(
6082                f"Transition '{transition}' from '{source}' was"
6083                f" expecting to have reciprocal '{reciprocal}' but"
6084                f" all edges have been processed and its reciprocal"
6085                f" is {repr(actual)}."
6086            )
6087
6088    # Finally get graph-level attribute values
6089    for (name, value) in graphStuff['attrs']:
6090        if name == "unknownCount":
6091            try:
6092                result.unknownCount = int(value)
6093            except ValueError:
6094                raise DotParseError(
6095                    f"Invalid 'unknownCount' value {repr(value)}."
6096                )
6097        elif name == "nextID":
6098            try:
6099                result.nextID = int(value)
6100            except ValueError:
6101                raise DotParseError(
6102                    f"Invalid 'nextID' value:"
6103                    f"\n  {repr(value)}"
6104                )
6105            collisionCourse = [x for x in result if x >= result.nextID]
6106            if len(collisionCourse) > 0:
6107                raise DotParseError(
6108                    f"Next ID {value} is wrong because the graph"
6109                    f" already contains one or more node(s) with"
6110                    f" ID(s) that is/are at least that large:"
6111                    f" {collisionCourse}"
6112                )
6113        elif name == "nextMechanismID":
6114            try:
6115                result.nextMechanismID = int(value)
6116            except ValueError:
6117                raise DotParseError(
6118                    f"Invalid 'nextMechanismID' value:"
6119                    f"\n  {repr(value)}"
6120                )
6121        elif name in (
6122            "equivalences",
6123            "reversionTypes",
6124            "mechanisms",
6125            "globalMechanisms",
6126            "nameLookup"
6127        ):
6128            try:
6129                setattr(result, name, fromJSON(value))
6130            except json.decoder.JSONDecodeError:
6131                raise DotParseError(
6132                    f"Invalid JSON in '{name}' attribute:"
6133                    f"\n  {repr(value)}"
6134                )
6135        else:
6136            raise DotParseError(
6137                f"Graph has unexpected attribute '{name}'."
6138            )
6139
6140    # Final check for mechanism ID value after both mechanism ID and
6141    # mechanisms dictionary have been parsed:
6142    leftBehind = [
6143        x
6144        for x in result.mechanisms
6145        if x >= result.nextMechanismID
6146    ]
6147    if len(leftBehind) > 0:
6148        raise DotParseError(
6149            f"Next mechanism ID {value} is wrong because"
6150            f" the graph already contains one or more"
6151            f" node(s) with ID(s) that is/are at least that"
6152            f" large: {leftBehind}"
6153        )
6154
6155    # And we're done!
6156    return result

Converts a graphviz dot-format string into a core.DecisionGraph. A custom ParseFormat may be specified if desired; the default ParseFormat is used if not. Note that this relies on specific indentation schemes used by toDot so a hand-edited dot-format graph will probably not work. A DotParseError is raised if the provided string can't be parsed. Example

>>> parseDotNode(' 3 [ label="A = \\"grate:open\\"" ]')
(3, [('label', 'A = "grate:open"')])
>>> sg = '''    ... subgraph __requirements__ {
...   3 [ label="A = \\"grate:open\\"" ]
...   4 [ label="B = \\"!(helmet)\\"" ]
...   5 [ label="C = \\"helmet\\"" ]
... }'''
>>> parseDotGraphContents(sg.splitlines()[1:])
({'nodes': [(3, [('label', 'A = "grate:open"')]), (4, [('label', 'B = "!(helmet)"')]), (5, [('label', 'C = "helmet"')])], 'edges': [], 'attrs': [], 'subgraphs': []}, [])
>>> from . import core
>>> dg = core.DecisionGraph.example('simple')
>>> encoded = toDot(dg)
>>> reconstructed = parseDot(encoded)
>>> for diff in dg.listDifferences(reconstructed):
...     print(diff)
>>> reconstructed == dg
True
>>> dg = core.DecisionGraph.example('abc')
>>> encoded = toDot(dg)
>>> reconstructed = parseDot(encoded)
>>> for diff in dg.listDifferences(reconstructed):
...     print(diff)
>>> reconstructed == dg
True
>>> tg = core.DecisionGraph()
>>> tg.addDecision('A')
0
>>> tg.addDecision('B')
1
>>> tg.addTransition('A', 'up', 'B', 'down')
>>> same = parseDot('''
... digraph {
...     0 [ name=A label=A ]
...       0 -> 1 [
...         label=up
...         fullLabel=up
...         reciprocal=down
...       ]
...     1 [ name=B label=B ]
...       1 -> 0 [
...         label=down
...         fullLabel=down
...         reciprocal=up
...       ]
... }''')
>>> for diff in tg.listDifferences(same):
...     print(diff)
>>> same == tg
True
>>> pf = ParseFormat()
>>> tg.setTransitionRequirement('A', 'up', pf.parseRequirement('one|two'))
>>> tg.setConsequence(
...     'B',
...     'down',
...     [base.effect(gain="one")]
... )
>>> test = parseDot('''
...   digraph {
...     0 [ name="A = \\"one|two\\"" label="A = \\"one|two\\"" ]
...   }
... ''')
>>> list(test.nodes)
[0]
>>> test.nodes[0]['name']
'A = "one|two"'
>>> eff = (
...   r'"A = \"[{\\\"type\\\": \\\"gain\\\",'
...   r' \\\"applyTo\\\": \\\"active\\\",'
...   r' \\\"value\\\": \\\"one\\\",'
...   r' \\\"charges\\\": null, \\\"hidden\\\": false,'
...   r' \\\"delay\\\": null}]\""'
... )
>>> utils.unquoted(eff)[1]
''
>>> test2 = parseDot(
...     'digraph {\n 0 [ name=' + eff + ' label=' + eff + ' ]\n}'
... )
>>> s = test2.nodes[0]['name']
>>> s[:25]
'A = "[{\\"type\\": \\"gain\\"'
>>> s[25:50]
', \\"applyTo\\": \\"active\\"'
>>> s[50:70]
', \\"value\\": \\"one\\"'
>>> s[70:89]
', \\"charges\\": null'
>>> s[89:108]
', \\"hidden\\": false'
>>> s[108:]
', \\"delay\\": null}]"'
>>> ae = s[s.index('=') + 1:].strip()
>>> uq, after = utils.unquoted(ae)
>>> after
''
>>> fromJSON(uq) == [base.effect(gain="one")]
True
>>> same = parseDot('''
... digraph {
...   0 [ name=A label=A ]
...     0 -> 1 [
...       label=up
...       fullLabel=up
...       reciprocal=down
...       req=A
...     ]
...   1 [ name=B label=B ]
...     1 -> 0 [
...       label=down
...       fullLabel=down
...       reciprocal=up
...       consequence=A
...     ]
...   subgraph __requirements__ {
...     2 [ label="A = \\"one|two\\"" ]
...   }
...   subgraph __consequences__ {
...     3 [ label=''' + eff + ''' ]
...   }
... }''')
>>> c = {'tags': {}, 'annotations': [], 'reciprocal': 'up', 'consequence': [{'type': 'gain', 'applyTo': 'active', 'value': 'one', 'delay': None, 'charges': None}]}['consequence']  # noqa
>>> for diff in tg.listDifferences(same):
...     print(diff)
>>> same == tg
True
def toDot( graph: exploration.core.DecisionGraph, clusterLevels: Union[str, List[int]] = [0]) -> str:
6159def toDot(
6160    graph: core.DecisionGraph,
6161    clusterLevels: Union[str, List[int]] = [0]
6162) -> str:
6163    """
6164    Converts the decision graph into a "dot"-format string suitable
6165    for processing by `graphviz`.
6166
6167    See [the dot language
6168    specification](https://graphviz.org/doc/info/lang.html) for more
6169    detail on the syntax we convert to.
6170
6171    If `clusterLevels` is given, it should be either the string '*',
6172    or a list of integers. '*' means that all zone levels should be
6173    cluster-style subgraphs, while a list of integers specifies that
6174    zones at those levels should be cluster-style subgraphs. This
6175    will prefix the subgraph names with 'cluster_' instead of just
6176    '_'.
6177
6178    TODO: Check edge cases for quotes in capability names, tag names,
6179    transition names, annotations, etc.
6180
6181    TODO: At least colons not allowed in tag names!
6182
6183    TODO: Spaces in decision/transition names? Other special
6184    characters in those names?
6185    """
6186    # Set up result including unknownCount and nextID
6187    result = (
6188        f"digraph {{"
6189        f"\n  unknownCount={graph.unknownCount}"
6190        f"\n  nextID={graph.nextID}"
6191        f"\n  nextMechanismID={graph.nextMechanismID}"
6192        f"\n"
6193    )
6194
6195    # Dictionaries for using letters to substitute for unique
6196    # requirements/consequences found throughout the graph. Keys are
6197    # quoted requirement or consequence reprs, and values are
6198    # abbreviation strings for them.
6199    currentReqKey = utils.nextAbbrKey(None)
6200    currentEffectKey = utils.nextAbbrKey(None)
6201    reqKeys: Dict[str, str] = {}
6202    consequenceKeys: Dict[str, str] = {}
6203
6204    # Add all decision and transition info
6205    decision: base.DecisionID  # TODO: Fix Multidigraph type stubs
6206    for decision in graph.nodes:
6207        nodeInfo = graph.nodes[decision]
6208        tags = nodeInfo.get('tags', {})
6209        annotations = toJSON(nodeInfo.get('annotations', []))
6210        zones = nodeInfo.get('zones', set())
6211        nodeAttrs = f"\n    name={utils.quoted(nodeInfo['name'])}"
6212        immediateZones = [z for z in zones if graph.zoneHierarchyLevel(z) == 0]
6213        if len(immediateZones) > 0:
6214            useZone = sorted(immediateZones)[0]
6215            # TODO: Don't hardcode :: here?
6216            withZone = useZone + "::" + nodeInfo['name']
6217            nodeAttrs += f"\n    label={utils.quoted(withZone)}"
6218        else:
6219            nodeAttrs += f"\n    label={utils.quoted(nodeInfo['name'])}"
6220        for tag, value in tags.items():
6221            rep = utils.quoted(toJSON(value))
6222            nodeAttrs += f"\n    t_{tag}={rep}"
6223        for z in sorted(zones):
6224            nodeAttrs += f"\n    z_{z}=1"
6225        if annotations:
6226            nodeAttrs += '\n    annotations=' + utils.quoted(annotations)
6227
6228        result += f'\n  {decision} [{nodeAttrs}\n  ]'
6229
6230        for (transition, destination) in graph._byEdge[decision].items():
6231            edgeAttrs = (
6232                '\n      label='
6233              + utils.quoted(utils.abbr(transition))
6234            )
6235            edgeAttrs += (
6236                '\n      fullLabel='
6237              + utils.quoted(transition)
6238            )
6239            reciprocal = graph.getReciprocal(decision, transition)
6240            if reciprocal is not None:
6241                edgeAttrs += (
6242                    '\n      reciprocal='
6243                  + utils.quoted(reciprocal)
6244                )
6245            info = graph.edges[
6246                decision,  # type:ignore
6247                destination,
6248                transition
6249            ]
6250            if 'requirement' in info:
6251                # Get string rep for requirement
6252                rep = utils.quoted(info['requirement'].unparse())
6253                # Get assigned abbreviation or assign one
6254                if rep in reqKeys:
6255                    ab = reqKeys[rep]
6256                else:
6257                    ab = currentReqKey
6258                    reqKeys[rep] = ab
6259                    currentReqKey = utils.nextAbbrKey(currentReqKey)
6260                # Add abbreviation as edge attribute
6261                edgeAttrs += f'\n      req={ab}'
6262            if 'consequence' in info:
6263                # Get string representation of consequences
6264                rep = utils.quoted(
6265                    toJSON(info['consequence'])
6266                )
6267                # Get abbreviation for that or assign one:
6268                if rep in consequenceKeys:
6269                    ab = consequenceKeys[rep]
6270                else:
6271                    ab = currentEffectKey
6272                    consequenceKeys[rep] = ab
6273                    currentEffectKey = utils.nextAbbrKey(
6274                        currentEffectKey
6275                    )
6276                # Add abbreviation as an edge attribute
6277                edgeAttrs += f'\n      consequence={ab}'
6278            for (tag, value) in info["tags"].items():
6279                # Get string representation of tag value
6280                rep = utils.quoted(toJSON(value))
6281                # Add edge attribute for tag
6282                edgeAttrs += f'\n      t_{tag}={rep}'
6283            if 'annotations' in info:
6284                edgeAttrs += (
6285                    '\n      annotations='
6286                  + utils.quoted(toJSON(info['annotations']))
6287                )
6288            result += f'\n    {decision} -> {destination}'
6289            result += f' [{edgeAttrs}\n    ]'
6290
6291    # Add zone info as subgraph structure
6292    for z, zinfo in graph.zones.items():
6293        parents = utils.quoted(toJSON(sorted(zinfo.parents)))
6294        if clusterLevels == '*' or zinfo.level in clusterLevels:
6295            zName = "cluster_" + z
6296        else:
6297            zName = '_' + z
6298        zoneSubgraph = f'\n  subgraph {utils.quoted(zName)} {{'
6299        zoneSubgraph += f'\n    label={z}'
6300        zoneSubgraph += f'\n    level={zinfo.level}'
6301        zoneSubgraph += f'\n    parents={parents}'
6302        for decision in sorted(graph.allDecisionsInZone(z)):
6303            zoneSubgraph += f'\n    {decision}'
6304        zoneSubgraph += '\n  }'
6305        result += zoneSubgraph
6306
6307    # Add equivalences, mechanisms, etc.
6308    for attr in [
6309        "equivalences",
6310        "reversionTypes",
6311        "mechanisms",
6312        "globalMechanisms",
6313        "nameLookup"
6314    ]:
6315        aRep = utils.quoted(toJSON(getattr(graph, attr)))
6316        result += f'\n  {attr}={aRep}'
6317
6318    # Add legend subgraphs to represent abbreviations
6319    useID = graph.nextID
6320    if reqKeys:
6321        result += '\n  subgraph __requirements__ {'
6322        for rrepr, ab in reqKeys.items():
6323            nStr = utils.quoted(ab + ' = ' + rrepr)
6324            result += (
6325                f"\n    {useID} [ label={nStr} ]"
6326            )
6327            useID += 1
6328        result += '\n  }'
6329
6330    if consequenceKeys:
6331        result += '\n  subgraph __consequences__ {'
6332        for erepr, ab in consequenceKeys.items():
6333            nStr = utils.quoted(ab + ' = ' + erepr)
6334            result += (
6335                f"\n    {useID} [ label={nStr} ]"
6336            )
6337            useID += 1
6338        result += '\n  }'
6339
6340    if graph.mechanisms:
6341        result += '\n  subgraph __mechanisms__ {'
6342        mID: base.MechanismID
6343        mWhere: Optional[base.DecisionID]
6344        mName: base.MechanismName
6345        for (mID, (mWhere, mName)) in graph.mechanisms.items():
6346            qName = utils.quoted(mName)
6347            nStr = utils.quoted(f"{mID}@{mWhere}:{qName}")
6348            result += (
6349                f"\n    {useID} [ label={nStr} ]"
6350            )
6351            useID += 1
6352        result += '\n  }'
6353
6354    result += "\n}\n"
6355    return result

Converts the decision graph into a "dot"-format string suitable for processing by graphviz.

See the dot language specification for more detail on the syntax we convert to.

If clusterLevels is given, it should be either the string '', or a list of integers. '' means that all zone levels should be cluster-style subgraphs, while a list of integers specifies that zones at those levels should be cluster-style subgraphs. This will prefix the subgraph names with 'cluster_' instead of just '_'.

TODO: Check edge cases for quotes in capability names, tag names, transition names, annotations, etc.

TODO: At least colons not allowed in tag names!

TODO: Spaces in decision/transition names? Other special characters in those names?

T = ~T

Type var for loadCustom.

def loadCustom(stream: <class 'TextIO'>, loadAs: Type[~T]) -> ~T:
6366def loadCustom(stream: TextIO, loadAs: Type[T]) -> T:
6367    """
6368    Loads a new JSON-encodable object from the JSON data in the
6369    given text stream (e.g., a file open in read mode). See
6370    `CustomJSONDecoder` for details on the format and which object types
6371    are supported.
6372
6373    This casts the result to the specified type, but errors out with a
6374    `TypeError` if it doesn't match.
6375    """
6376    result = json.load(stream, cls=CustomJSONDecoder)
6377    if isinstance(result, loadAs):
6378        return result
6379    else:
6380        raise TypeError(
6381            f"Expected to load a {loadAs} but got a {type(result)}."
6382        )

Loads a new JSON-encodable object from the JSON data in the given text stream (e.g., a file open in read mode). See CustomJSONDecoder for details on the format and which object types are supported.

This casts the result to the specified type, but errors out with a TypeError if it doesn't match.

def saveCustom( toSave: Union[exploration.base.MetricSpace, exploration.core.DecisionGraph, exploration.core.DiscreteExploration], stream: <class 'TextIO'>) -> None:
6385def saveCustom(
6386    toSave: Union[  # TODO: More in this union?
6387        base.MetricSpace,
6388        core.DecisionGraph,
6389        core.DiscreteExploration,
6390    ],
6391    stream: TextIO
6392) -> None:
6393    """
6394    Saves a JSON-encodable object as JSON into the given text stream
6395    (e.g., a file open in writing mode). See `CustomJSONEncoder` for
6396    details on the format and which types are supported..
6397    """
6398    json.dump(toSave, stream, cls=CustomJSONEncoder)

Saves a JSON-encodable object as JSON into the given text stream (e.g., a file open in writing mode). See CustomJSONEncoder for details on the format and which types are supported..

def toJSON(obj: Any) -> str:
6401def toJSON(obj: Any) -> str:
6402    """
6403    Defines the standard object -> JSON operation using the
6404    `CustomJSONEncoder` as well as not using `sort_keys`.
6405    """
6406    return CustomJSONEncoder(sort_keys=False).encode(obj)

Defines the standard object -> JSON operation using the CustomJSONEncoder as well as not using sort_keys.

def fromJSON(encoded: str) -> Any:
6409def fromJSON(encoded: str) -> Any:
6410    """
6411    Defines the standard JSON -> object operation using
6412    `CustomJSONDecoder`.
6413    """
6414    return json.loads(encoded, cls=CustomJSONDecoder)

Defines the standard JSON -> object operation using CustomJSONDecoder.

class CustomJSONEncoder(json.encoder.JSONEncoder):
6417class CustomJSONEncoder(json.JSONEncoder):
6418    """
6419    A custom JSON encoder that has special protocols for handling the
6420    smae objects that `CustomJSONDecoder` decodes. It handles these
6421    objects specially so that they can be decoded back to their original
6422    form.
6423
6424    Examples:
6425
6426    >>> from . import core
6427    >>> tupList = [(1, 1), (2, 2)]
6428    >>> encTup = toJSON(tupList)
6429    >>> encTup
6430    '[{"^^d": "t", "values": [1, 1]}, {"^^d": "t", "values": [2, 2]}]'
6431    >>> fromJSON(encTup) == tupList
6432    True
6433    >>> dg = core.DecisionGraph.example('simple')
6434    >>> fromJSON(toJSON(dg)) == dg
6435    True
6436    >>> dg = core.DecisionGraph.example('abc')
6437    >>> zi = dg.getZoneInfo('upZone')
6438    >>> zi
6439    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6440 annotations=[])
6441    >>> zj = toJSON(zi)
6442    >>> zj
6443    '{"^^d": "nt", "name": "ZoneInfo", "values":\
6444 {"level": 1, "parents": {"^^d": "s", "values": []},\
6445 "contents": {"^^d": "s", "values": ["zoneA"]}, "tags": {},\
6446 "annotations": []}}'
6447    >>> fromJSON(toJSON(zi))
6448    ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={},\
6449 annotations=[])
6450    >>> fromJSON(toJSON(zi)) == zi
6451    True
6452    >>> toJSON({'a': 'b', 1: 2})
6453    '{"^^d": "d", "items": [["a", "b"], [1, 2]]}'
6454    >>> toJSON(((1, 2), (3, 4)))
6455    '{"^^d": "t", "values": [{"^^d": "t", "values": [1, 2]},\
6456 {"^^d": "t", "values": [3, 4]}]}'
6457    >>> toJSON(base.effect(set=('grate', 'open')))
6458    '{"type": "set", "applyTo": "active",\
6459 "value": {"^^d": "t",\
6460 "values": [{"^^d": "nt", "name": "MechanismSpecifier",\
6461 "values": {"domain": null, "zone": null, "decision": null, "name": "grate"}},\
6462 "open"]}, "delay": null, "charges": null, "hidden": false}'
6463    >>> j = toJSON(dg)
6464    >>> expected = (
6465    ... '{"^^d": "DG",'
6466    ... ' "props": {},'
6467    ... ' "node_links": {"directed": true,'
6468    ... ' "multigraph": true,'
6469    ... ' "graph": {},'
6470    ... ' "nodes": ['
6471    ... '{"name": "A", "domain": "main", "tags": {},'
6472    ... ' "annotations": ["This is a multi-word \\\\"annotation.\\\\""],'
6473    ... ' "zones": {"^^d": "s", "values": ["zoneA"]},'
6474    ... ' "mechanisms": {"grate": 0},'
6475    ... ' "id": 0'
6476    ... '},'
6477    ... ' {'
6478    ... '"name": "B",'
6479    ... ' "domain": "main",'
6480    ... ' "tags": {"b": 1, "tag2": "\\\\"value\\\\""},'
6481    ... ' "annotations": [],'
6482    ... ' "zones": {"^^d": "s", "values": ["zoneB"]},'
6483    ... ' "id": 1'
6484    ... '},'
6485    ... ' {'
6486    ... '"name": "C",'
6487    ... ' "domain": "main",'
6488    ... ' "tags": {"aw\\\\"ful": "ha\\'ha"},'
6489    ... ' "annotations": [],'
6490    ... ' "zones": {"^^d": "s", "values": ["zoneA"]},'
6491    ... ' "id": 2'
6492    ... '}'
6493    ... '],'
6494    ... ' "links": ['
6495    ... '{'
6496    ... '"tags": {},'
6497    ... ' "annotations": [],'
6498    ... ' "reciprocal": "right",'
6499    ... ' "source": 0,'
6500    ... ' "target": 1,'
6501    ... ' "key": "left"'
6502    ... '},'
6503    ... ' {'
6504    ... '"tags": {},'
6505    ... ' "annotations": [],'
6506    ... ' "reciprocal": "up_right",'
6507    ... ' "requirement": {"^^d": "R", "value": "grate:open"},'
6508    ... ' "source": 0,'
6509    ... ' "target": 1,'
6510    ... ' "key": "up_left"'
6511    ... '},'
6512    ... ' {'
6513    ... '"tags": {},'
6514    ... ' "annotations": ["Transition \\'annotation.\\'"],'
6515    ... ' "reciprocal": "up",'
6516    ... ' "source": 0,'
6517    ... ' "target": 2,'
6518    ... ' "key": "down"'
6519    ... '},'
6520    ... ' {'
6521    ... '"tags": {},'
6522    ... ' "annotations": [],'
6523    ... ' "reciprocal": "left",'
6524    ... ' "source": 1,'
6525    ... ' "target": 0,'
6526    ... ' "key": "right"'
6527    ... '},'
6528    ... ' {'
6529    ... '"tags": {},'
6530    ... ' "annotations": [],'
6531    ... ' "reciprocal": "up_left",'
6532    ... ' "requirement": {"^^d": "R", "value": "grate:open"},'
6533    ... ' "source": 1,'
6534    ... ' "target": 0,'
6535    ... ' "key": "up_right"'
6536    ... '},'
6537    ... ' {'
6538    ... '"tags": {"fast": 1},'
6539    ... ' "annotations": [],'
6540    ... ' "reciprocal": "down",'
6541    ... ' "source": 2,'
6542    ... ' "target": 0,'
6543    ... ' "key": "up"'
6544    ... '},'
6545    ... ' {'
6546    ... '"tags": {},'
6547    ... ' "annotations": [],'
6548    ... ' "requirement": {"^^d": "R", "value": "!(helmet)"},'
6549    ... ' "consequence": ['
6550    ... '{'
6551    ... '"type": "gain", "applyTo": "active", "value": "helmet",'
6552    ... ' "delay": null, "charges": null, "hidden": false'
6553    ... '},'
6554    ... ' {'
6555    ... '"type": "deactivate",'
6556    ... ' "applyTo": "active", "value": null,'
6557    ... ' "delay": 3, "charges": null, "hidden": false'
6558    ... '}'
6559    ... '],'
6560    ... ' "source": 2,'
6561    ... ' "target": 2,'
6562    ... ' "key": "grab_helmet"'
6563    ... '},'
6564    ... ' {'
6565    ... '"tags": {},'
6566    ... ' "annotations": [],'
6567    ... ' "requirement": {"^^d": "R", "value": "helmet"},'
6568    ... ' "consequence": ['
6569    ... '{"type": "lose", "applyTo": "active", "value": "helmet",'
6570    ... ' "delay": null, "charges": null, "hidden": false},'
6571    ... ' {"type": "gain", "applyTo": "active",'
6572    ... ' "value": {"^^d": "t", "values": ["token", 1]},'
6573    ... ' "delay": null, "charges": null, "hidden": false'
6574    ... '},'
6575    ... ' {"condition":'
6576    ... ' {"^^d": "R", "value": "token*2"},'
6577    ... ' "consequence": ['
6578    ... '{"type": "set", "applyTo": "active",'
6579    ... ' "value": {"^^d": "t", "values": ['
6580    ... '{"^^d": "nt", "name": "MechanismSpecifier",'
6581    ... ' "values": {"domain": null, "zone": null, "decision": null,'
6582    ... ' "name": "grate"}}, "open"]},'
6583    ... ' "delay": null, "charges": null, "hidden": false'
6584    ... '},'
6585    ... ' {"type": "deactivate", "applyTo": "active", "value": null,'
6586    ... ' "delay": null, "charges": null, "hidden": false'
6587    ... '}'
6588    ... '],'
6589    ... ' "alternative": []'
6590    ... '}'
6591    ... '],'
6592    ... ' "source": 2,'
6593    ... ' "target": 2,'
6594    ... ' "key": "pull_lever"'
6595    ... '}'
6596    ... ']'
6597    ... '},'
6598    ... ' "_byEdge": {"^^d": "d", "items":'
6599    ... ' [[0, {"left": 1, "up_left": 1, "down": 2}],'
6600    ... ' [1, {"right": 0, "up_right": 0}],'
6601    ... ' [2, {"up": 0, "grab_helmet": 2, "pull_lever": 2}]]},'
6602    ... ' "zones": {"zoneA":'
6603    ... ' {"^^d": "nt", "name": "ZoneInfo",'
6604    ... ' "values": {'
6605    ... '"level": 0,'
6606    ... ' "parents": {"^^d": "s", "values": ["upZone"]},'
6607    ... ' "contents": {"^^d": "s", "values": [0, 2]},'
6608    ... ' "tags": {},'
6609    ... ' "annotations": []'
6610    ... '}'
6611    ... '},'
6612    ... ' "zoneB":'
6613    ... ' {"^^d": "nt", "name": "ZoneInfo",'
6614    ... ' "values": {'
6615    ... '"level": 0,'
6616    ... ' "parents": {"^^d": "s", "values": []},'
6617    ... ' "contents": {"^^d": "s", "values": [1]},'
6618    ... ' "tags": {},'
6619    ... ' "annotations": []'
6620    ... '}'
6621    ... '},'
6622    ... ' "upZone":'
6623    ... ' {"^^d": "nt", "name": "ZoneInfo",'
6624    ... ' "values": {'
6625    ... '"level": 1,'
6626    ... ' "parents": {"^^d": "s", "values": []},'
6627    ... ' "contents": {"^^d": "s", "values": ["zoneA"]},'
6628    ... ' "tags": {},'
6629    ... ' "annotations": []'
6630    ... '}'
6631    ... '}'
6632    ... '},'
6633    ... ' "unknownCount": 0,'
6634    ... ' "equivalences": {"^^d": "d", "items": ['
6635    ... '[{"^^d": "t", "values": [0, "open"]},'
6636    ... ' {"^^d": "s", "values": ['
6637    ... '{"^^d": "R", "value": "helmet"}]}]'
6638    ... ']},'
6639    ... ' "reversionTypes": {},'
6640    ... ' "nextMechanismID": 1,'
6641    ... ' "mechanisms": {"^^d": "d", "items": ['
6642    ... '[0, {"^^d": "t", "values": [0, "grate"]}]]},'
6643    ... ' "globalMechanisms": {},'
6644    ... ' "nameLookup": {"A": [0], "B": [1], "C": [2]}'
6645    ... '}'
6646    ... )
6647    >>> for i in range(len(j)):
6648    ...     if j[i] != expected[i:i+1]:
6649    ...         print(
6650    ...             'exp: ' + expected[i-10:i+50] + '\\ngot: ' + j[i-10:i+50]
6651    ...         )
6652    ...         break
6653    >>> j == expected
6654    True
6655    >>> rec = fromJSON(j)
6656    >>> rec.nodes == dg.nodes
6657    True
6658    >>> rec.edges == dg.edges
6659    True
6660    >>> rec.unknownCount == dg.unknownCount
6661    True
6662    >>> rec.equivalences == dg.equivalences
6663    True
6664    >>> rec.reversionTypes == dg.reversionTypes
6665    True
6666    >>> rec._byEdge == dg._byEdge
6667    True
6668    >>> rec.zones == dg.zones
6669    True
6670    >>> for diff in dg.listDifferences(rec):
6671    ...     print(diff)
6672    >>> rec == dg
6673    True
6674
6675    `base.MetricSpace` example:
6676
6677    >>> ms = base.MetricSpace("test")
6678    >>> ms.addPoint([2, 3])
6679    0
6680    >>> ms.addPoint([2, 7, 0])
6681    1
6682    >>> ms.addPoint([2, 7])
6683    2
6684    >>> toJSON(ms) # TODO: ^^d entries here
6685    '{"^^d": "MS", "name": "test",\
6686 "points": {"^^d": "d", "items": [[0, [2, 3]], [1, [2, 7,\
6687 0]], [2, [2, 7]]]}, "lastID": 2}'
6688    >>> ms.removePoint(0)
6689    >>> ms.removePoint(1)
6690    >>> ms.removePoint(2)
6691    >>> toJSON(ms)
6692    '{"^^d": "MS", "name": "test", "points": {}, "lastID": 2}'
6693    >>> ms.addPoint([5, 6])
6694    3
6695    >>> ms.addPoint([7, 8])
6696    4
6697    >>> toJSON(ms)
6698    '{"^^d": "MS", "name": "test",\
6699 "points": {"^^d": "d", "items": [[3, [5, 6]], [4, [7, 8]]]}, "lastID": 4}'
6700
6701    # TODO: more examples, including one for a DiscreteExploration
6702    """
6703
6704    def default(self, o: Any) -> Any:
6705        """
6706        Re-writes objects for encoding. We re-write the following
6707        objects:
6708
6709        - `set`
6710        - `dict` (if the keys aren't all strings)
6711        - `tuple`/`namedtuple`
6712        - `ZoneInfo`
6713        - `Requirement`
6714        - `SkillCombination`
6715        - `DecisionGraph`
6716        - `DiscreteExploration`
6717        - `MetricSpace`
6718
6719        TODO: FeatureGraph...
6720        """
6721        if isinstance(o, list):
6722            return [self.default(x) for x in o]
6723
6724        elif isinstance(o, set):
6725            return {
6726                '^^d': 's',
6727                'values': sorted(
6728                    [self.default(e) for e in o],
6729                    key=lambda x: str(x)
6730                )
6731            }
6732
6733        elif isinstance(o, dict):
6734            if all(isinstance(k, str) for k in o):
6735                return {
6736                    k: self.default(v)
6737                    for k, v in o.items()
6738                }
6739            else:
6740                return {
6741                    '^^d': 'd',
6742                    'items': [
6743                        [self.default(k), self.default(v)]
6744                        for (k, v) in o.items()
6745                    ]
6746                }
6747
6748        elif isinstance(o, tuple):
6749            if hasattr(o, '_fields') and hasattr(o, '_asdict'):
6750                # Named tuple
6751                return {
6752                    '^^d': 'nt',
6753                    'name': o.__class__.__name__,
6754                    'values': {
6755                        k: self.default(v)
6756                        for k, v in o._asdict().items()
6757                    }
6758                }
6759            else:
6760                # Normal tuple
6761                return {
6762                    '^^d': 't',
6763                    "values": [self.default(e) for e in o]
6764                }
6765
6766        elif isinstance(o, base.Requirement):
6767            return {
6768                '^^d': 'R',
6769                'value': o.unparse()
6770            }
6771
6772        elif isinstance(o, base.SkillCombination):
6773            return {
6774                '^^d': 'SC',
6775                'value': o.unparse()
6776            }
6777        # TODO: Consequence, Condition, Challenge, and Effect here?
6778
6779        elif isinstance(o, core.DecisionGraph):
6780            return {
6781                '^^d': 'DG',
6782                'props': self.default(o.graph),  # type:ignore [attr-defined]
6783                'node_links': self.default(
6784                    networkx.node_link_data(o, edges="links") # type: ignore
6785                    # TODO: Fix networkx stubs
6786                ),
6787                '_byEdge': self.default(o._byEdge),
6788                'zones': self.default(o.zones),
6789                'unknownCount': o.unknownCount,
6790                'equivalences': self.default(o.equivalences),
6791                'reversionTypes': self.default(o.reversionTypes),
6792                'nextMechanismID': o.nextMechanismID,
6793                'mechanisms': self.default(o.mechanisms),
6794                'globalMechanisms': self.default(o.globalMechanisms),
6795                'nameLookup': self.default(o.nameLookup)
6796            }
6797
6798        elif isinstance(o, core.DiscreteExploration):
6799            return {
6800                '^^d': 'DE',
6801                'situations': self.default(o.situations)
6802            }
6803
6804        elif isinstance(o, base.MetricSpace):
6805            return {
6806                '^^d': 'MS',
6807                'name': o.name,
6808                'points': self.default(o.points),
6809                'lastID': o.lastID()
6810            }
6811
6812        else:
6813            return o
6814
6815    def encode(self, o: Any) -> str:
6816        """
6817        Custom encode function since we need to override behavior for
6818        tuples and dicts.
6819        """
6820        if isinstance(o, (tuple, dict, set)):
6821            o = self.default(o)
6822        elif isinstance(o, list):
6823            o = [self.default(x) for x in o]
6824
6825        try:
6826            return super().encode(o)
6827        except TypeError:
6828            return super().encode(self.default(o))
6829
6830    def iterencode(
6831        self,
6832        o: Any,
6833        _one_shot: bool = False
6834    ) -> Generator[str, None, None]:
6835        """
6836        Custom iterencode function since we need to override behavior for
6837        tuples and dicts.
6838        """
6839        if isinstance(o, (tuple, dict)):
6840            o = self.default(o)
6841
6842        yield from super().iterencode(o, _one_shot=_one_shot)

A custom JSON encoder that has special protocols for handling the smae objects that CustomJSONDecoder decodes. It handles these objects specially so that they can be decoded back to their original form.

Examples:

>>> from . import core
>>> tupList = [(1, 1), (2, 2)]
>>> encTup = toJSON(tupList)
>>> encTup
'[{"^^d": "t", "values": [1, 1]}, {"^^d": "t", "values": [2, 2]}]'
>>> fromJSON(encTup) == tupList
True
>>> dg = core.DecisionGraph.example('simple')
>>> fromJSON(toJSON(dg)) == dg
True
>>> dg = core.DecisionGraph.example('abc')
>>> zi = dg.getZoneInfo('upZone')
>>> zi
ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={}, annotations=[])
>>> zj = toJSON(zi)
>>> zj
'{"^^d": "nt", "name": "ZoneInfo", "values": {"level": 1, "parents": {"^^d": "s", "values": []}, "contents": {"^^d": "s", "values": ["zoneA"]}, "tags": {}, "annotations": []}}'
>>> fromJSON(toJSON(zi))
ZoneInfo(level=1, parents=set(), contents={'zoneA'}, tags={}, annotations=[])
>>> fromJSON(toJSON(zi)) == zi
True
>>> toJSON({'a': 'b', 1: 2})
'{"^^d": "d", "items": [["a", "b"], [1, 2]]}'
>>> toJSON(((1, 2), (3, 4)))
'{"^^d": "t", "values": [{"^^d": "t", "values": [1, 2]}, {"^^d": "t", "values": [3, 4]}]}'
>>> toJSON(base.effect(set=('grate', 'open')))
'{"type": "set", "applyTo": "active", "value": {"^^d": "t", "values": [{"^^d": "nt", "name": "MechanismSpecifier", "values": {"domain": null, "zone": null, "decision": null, "name": "grate"}}, "open"]}, "delay": null, "charges": null, "hidden": false}'
>>> j = toJSON(dg)
>>> expected = (
... '{"^^d": "DG",'
... ' "props": {},'
... ' "node_links": {"directed": true,'
... ' "multigraph": true,'
... ' "graph": {},'
... ' "nodes": ['
... '{"name": "A", "domain": "main", "tags": {},'
... ' "annotations": ["This is a multi-word \\"annotation.\\""],'
... ' "zones": {"^^d": "s", "values": ["zoneA"]},'
... ' "mechanisms": {"grate": 0},'
... ' "id": 0'
... '},'
... ' {'
... '"name": "B",'
... ' "domain": "main",'
... ' "tags": {"b": 1, "tag2": "\\"value\\""},'
... ' "annotations": [],'
... ' "zones": {"^^d": "s", "values": ["zoneB"]},'
... ' "id": 1'
... '},'
... ' {'
... '"name": "C",'
... ' "domain": "main",'
... ' "tags": {"aw\\"ful": "ha\'ha"},'
... ' "annotations": [],'
... ' "zones": {"^^d": "s", "values": ["zoneA"]},'
... ' "id": 2'
... '}'
... '],'
... ' "links": ['
... '{'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "right",'
... ' "source": 0,'
... ' "target": 1,'
... ' "key": "left"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "up_right",'
... ' "requirement": {"^^d": "R", "value": "grate:open"},'
... ' "source": 0,'
... ' "target": 1,'
... ' "key": "up_left"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": ["Transition \'annotation.\'"],'
... ' "reciprocal": "up",'
... ' "source": 0,'
... ' "target": 2,'
... ' "key": "down"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "left",'
... ' "source": 1,'
... ' "target": 0,'
... ' "key": "right"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "reciprocal": "up_left",'
... ' "requirement": {"^^d": "R", "value": "grate:open"},'
... ' "source": 1,'
... ' "target": 0,'
... ' "key": "up_right"'
... '},'
... ' {'
... '"tags": {"fast": 1},'
... ' "annotations": [],'
... ' "reciprocal": "down",'
... ' "source": 2,'
... ' "target": 0,'
... ' "key": "up"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "requirement": {"^^d": "R", "value": "!(helmet)"},'
... ' "consequence": ['
... '{'
... '"type": "gain", "applyTo": "active", "value": "helmet",'
... ' "delay": null, "charges": null, "hidden": false'
... '},'
... ' {'
... '"type": "deactivate",'
... ' "applyTo": "active", "value": null,'
... ' "delay": 3, "charges": null, "hidden": false'
... '}'
... '],'
... ' "source": 2,'
... ' "target": 2,'
... ' "key": "grab_helmet"'
... '},'
... ' {'
... '"tags": {},'
... ' "annotations": [],'
... ' "requirement": {"^^d": "R", "value": "helmet"},'
... ' "consequence": ['
... '{"type": "lose", "applyTo": "active", "value": "helmet",'
... ' "delay": null, "charges": null, "hidden": false},'
... ' {"type": "gain", "applyTo": "active",'
... ' "value": {"^^d": "t", "values": ["token", 1]},'
... ' "delay": null, "charges": null, "hidden": false'
... '},'
... ' {"condition":'
... ' {"^^d": "R", "value": "token*2"},'
... ' "consequence": ['
... '{"type": "set", "applyTo": "active",'
... ' "value": {"^^d": "t", "values": ['
... '{"^^d": "nt", "name": "MechanismSpecifier",'
... ' "values": {"domain": null, "zone": null, "decision": null,'
... ' "name": "grate"}}, "open"]},'
... ' "delay": null, "charges": null, "hidden": false'
... '},'
... ' {"type": "deactivate", "applyTo": "active", "value": null,'
... ' "delay": null, "charges": null, "hidden": false'
... '}'
... '],'
... ' "alternative": []'
... '}'
... '],'
... ' "source": 2,'
... ' "target": 2,'
... ' "key": "pull_lever"'
... '}'
... ']'
... '},'
... ' "_byEdge": {"^^d": "d", "items":'
... ' [[0, {"left": 1, "up_left": 1, "down": 2}],'
... ' [1, {"right": 0, "up_right": 0}],'
... ' [2, {"up": 0, "grab_helmet": 2, "pull_lever": 2}]]},'
... ' "zones": {"zoneA":'
... ' {"^^d": "nt", "name": "ZoneInfo",'
... ' "values": {'
... '"level": 0,'
... ' "parents": {"^^d": "s", "values": ["upZone"]},'
... ' "contents": {"^^d": "s", "values": [0, 2]},'
... ' "tags": {},'
... ' "annotations": []'
... '}'
... '},'
... ' "zoneB":'
... ' {"^^d": "nt", "name": "ZoneInfo",'
... ' "values": {'
... '"level": 0,'
... ' "parents": {"^^d": "s", "values": []},'
... ' "contents": {"^^d": "s", "values": [1]},'
... ' "tags": {},'
... ' "annotations": []'
... '}'
... '},'
... ' "upZone":'
... ' {"^^d": "nt", "name": "ZoneInfo",'
... ' "values": {'
... '"level": 1,'
... ' "parents": {"^^d": "s", "values": []},'
... ' "contents": {"^^d": "s", "values": ["zoneA"]},'
... ' "tags": {},'
... ' "annotations": []'
... '}'
... '}'
... '},'
... ' "unknownCount": 0,'
... ' "equivalences": {"^^d": "d", "items": ['
... '[{"^^d": "t", "values": [0, "open"]},'
... ' {"^^d": "s", "values": ['
... '{"^^d": "R", "value": "helmet"}]}]'
... ']},'
... ' "reversionTypes": {},'
... ' "nextMechanismID": 1,'
... ' "mechanisms": {"^^d": "d", "items": ['
... '[0, {"^^d": "t", "values": [0, "grate"]}]]},'
... ' "globalMechanisms": {},'
... ' "nameLookup": {"A": [0], "B": [1], "C": [2]}'
... '}'
... )
>>> for i in range(len(j)):
...     if j[i] != expected[i:i+1]:
...         print(
...             'exp: ' + expected[i-10:i+50] + '\ngot: ' + j[i-10:i+50]
...         )
...         break
>>> j == expected
True
>>> rec = fromJSON(j)
>>> rec.nodes == dg.nodes
True
>>> rec.edges == dg.edges
True
>>> rec.unknownCount == dg.unknownCount
True
>>> rec.equivalences == dg.equivalences
True
>>> rec.reversionTypes == dg.reversionTypes
True
>>> rec._byEdge == dg._byEdge
True
>>> rec.zones == dg.zones
True
>>> for diff in dg.listDifferences(rec):
...     print(diff)
>>> rec == dg
True

base.MetricSpace example:

>>> ms = base.MetricSpace("test")
>>> ms.addPoint([2, 3])
0
>>> ms.addPoint([2, 7, 0])
1
>>> ms.addPoint([2, 7])
2
>>> toJSON(ms) # TODO: ^^d entries here
'{"^^d": "MS", "name": "test", "points": {"^^d": "d", "items": [[0, [2, 3]], [1, [2, 7, 0]], [2, [2, 7]]]}, "lastID": 2}'
>>> ms.removePoint(0)
>>> ms.removePoint(1)
>>> ms.removePoint(2)
>>> toJSON(ms)
'{"^^d": "MS", "name": "test", "points": {}, "lastID": 2}'
>>> ms.addPoint([5, 6])
3
>>> ms.addPoint([7, 8])
4
>>> toJSON(ms)
'{"^^d": "MS", "name": "test", "points": {"^^d": "d", "items": [[3, [5, 6]], [4, [7, 8]]]}, "lastID": 4}'

TODO: more examples, including one for a DiscreteExploration

def default(self, o: Any) -> Any:
6704    def default(self, o: Any) -> Any:
6705        """
6706        Re-writes objects for encoding. We re-write the following
6707        objects:
6708
6709        - `set`
6710        - `dict` (if the keys aren't all strings)
6711        - `tuple`/`namedtuple`
6712        - `ZoneInfo`
6713        - `Requirement`
6714        - `SkillCombination`
6715        - `DecisionGraph`
6716        - `DiscreteExploration`
6717        - `MetricSpace`
6718
6719        TODO: FeatureGraph...
6720        """
6721        if isinstance(o, list):
6722            return [self.default(x) for x in o]
6723
6724        elif isinstance(o, set):
6725            return {
6726                '^^d': 's',
6727                'values': sorted(
6728                    [self.default(e) for e in o],
6729                    key=lambda x: str(x)
6730                )
6731            }
6732
6733        elif isinstance(o, dict):
6734            if all(isinstance(k, str) for k in o):
6735                return {
6736                    k: self.default(v)
6737                    for k, v in o.items()
6738                }
6739            else:
6740                return {
6741                    '^^d': 'd',
6742                    'items': [
6743                        [self.default(k), self.default(v)]
6744                        for (k, v) in o.items()
6745                    ]
6746                }
6747
6748        elif isinstance(o, tuple):
6749            if hasattr(o, '_fields') and hasattr(o, '_asdict'):
6750                # Named tuple
6751                return {
6752                    '^^d': 'nt',
6753                    'name': o.__class__.__name__,
6754                    'values': {
6755                        k: self.default(v)
6756                        for k, v in o._asdict().items()
6757                    }
6758                }
6759            else:
6760                # Normal tuple
6761                return {
6762                    '^^d': 't',
6763                    "values": [self.default(e) for e in o]
6764                }
6765
6766        elif isinstance(o, base.Requirement):
6767            return {
6768                '^^d': 'R',
6769                'value': o.unparse()
6770            }
6771
6772        elif isinstance(o, base.SkillCombination):
6773            return {
6774                '^^d': 'SC',
6775                'value': o.unparse()
6776            }
6777        # TODO: Consequence, Condition, Challenge, and Effect here?
6778
6779        elif isinstance(o, core.DecisionGraph):
6780            return {
6781                '^^d': 'DG',
6782                'props': self.default(o.graph),  # type:ignore [attr-defined]
6783                'node_links': self.default(
6784                    networkx.node_link_data(o, edges="links") # type: ignore
6785                    # TODO: Fix networkx stubs
6786                ),
6787                '_byEdge': self.default(o._byEdge),
6788                'zones': self.default(o.zones),
6789                'unknownCount': o.unknownCount,
6790                'equivalences': self.default(o.equivalences),
6791                'reversionTypes': self.default(o.reversionTypes),
6792                'nextMechanismID': o.nextMechanismID,
6793                'mechanisms': self.default(o.mechanisms),
6794                'globalMechanisms': self.default(o.globalMechanisms),
6795                'nameLookup': self.default(o.nameLookup)
6796            }
6797
6798        elif isinstance(o, core.DiscreteExploration):
6799            return {
6800                '^^d': 'DE',
6801                'situations': self.default(o.situations)
6802            }
6803
6804        elif isinstance(o, base.MetricSpace):
6805            return {
6806                '^^d': 'MS',
6807                'name': o.name,
6808                'points': self.default(o.points),
6809                'lastID': o.lastID()
6810            }
6811
6812        else:
6813            return o

Re-writes objects for encoding. We re-write the following objects:

  • set
  • dict (if the keys aren't all strings)
  • tuple/namedtuple
  • ZoneInfo
  • Requirement
  • SkillCombination
  • DecisionGraph
  • DiscreteExploration
  • MetricSpace

TODO: FeatureGraph...

def encode(self, o: Any) -> str:
6815    def encode(self, o: Any) -> str:
6816        """
6817        Custom encode function since we need to override behavior for
6818        tuples and dicts.
6819        """
6820        if isinstance(o, (tuple, dict, set)):
6821            o = self.default(o)
6822        elif isinstance(o, list):
6823            o = [self.default(x) for x in o]
6824
6825        try:
6826            return super().encode(o)
6827        except TypeError:
6828            return super().encode(self.default(o))

Custom encode function since we need to override behavior for tuples and dicts.

def iterencode( self, o: Any, _one_shot: bool = False) -> Generator[str, NoneType, NoneType]:
6830    def iterencode(
6831        self,
6832        o: Any,
6833        _one_shot: bool = False
6834    ) -> Generator[str, None, None]:
6835        """
6836        Custom iterencode function since we need to override behavior for
6837        tuples and dicts.
6838        """
6839        if isinstance(o, (tuple, dict)):
6840            o = self.default(o)
6841
6842        yield from super().iterencode(o, _one_shot=_one_shot)

Custom iterencode function since we need to override behavior for tuples and dicts.

Inherited Members
json.encoder.JSONEncoder
JSONEncoder
item_separator
key_separator
skipkeys
ensure_ascii
check_circular
allow_nan
sort_keys
indent
class CustomJSONDecoder(json.decoder.JSONDecoder):
6845class CustomJSONDecoder(json.JSONDecoder):
6846    """
6847    A custom JSON decoder that has special protocols for handling
6848    several types, including:
6849
6850    - `set`
6851    - `tuple` & `namedtuple`
6852    - `dict` (where keys aren't all strings)
6853    - `Requirement`
6854    - `SkillCombination`
6855    - `DecisionGraph`
6856    - `DiscreteExploration`
6857    - `MetricSpace`
6858
6859    Used by `toJSON`
6860
6861    When initializing it, you can st a custom parse format by supplying
6862    a 'parseFormat' keyword argument; by default a standard
6863    `ParseFormat` will be used.
6864
6865    Examples:
6866
6867    >>> r = base.ReqAny([
6868    ...     base.ReqCapability('power'),
6869    ...     base.ReqTokens('money', 5)
6870    ... ])
6871    >>> s = toJSON(r)
6872    >>> s
6873    '{"^^d": "R", "value": "(power|money*5)"}'
6874    >>> l = fromJSON(s)
6875    >>> r == l
6876    True
6877    >>> o = {1, 2, 'hi'}
6878    >>> s = toJSON(o)
6879    >>> s
6880    '{"^^d": "s", "values": [1, 2, "hi"]}'
6881    >>> l = fromJSON(s)
6882    >>> o == l
6883    True
6884    >>> zi = base.ZoneInfo(1, set(), set(), {}, [])
6885    >>> s = toJSON(zi)
6886    >>> c = (
6887    ... '{"^^d": "nt", "name": "ZoneInfo", "values": {'
6888    ... '"level": 1,'
6889    ... ' "parents": {"^^d": "s", "values": []},'
6890    ... ' "contents": {"^^d": "s", "values": []},'
6891    ... ' "tags": {},'
6892    ... ' "annotations": []'
6893    ... '}}'
6894    ... )
6895    >>> s == c
6896    True
6897    >>> setm = base.effect(set=("door", "open"))
6898    >>> s = toJSON(setm)
6899    >>> f = fromJSON(s)
6900    >>> f == setm
6901    True
6902    >>> pf = ParseFormat()
6903    >>> pf.unparseEffect(f)
6904    'set door:open'
6905    >>> pf.unparseEffect(f) == pf.unparseEffect(setm)
6906    True
6907
6908    TODO: SkillCombination example
6909    """
6910    def __init__(self, *args, **kwargs):
6911        if 'object_hook' in kwargs:
6912            outerHook = kwargs['object_hook']
6913            kwargs['object_hook'] = (
6914                lambda o: outerHook(self.unpack(o))
6915            )
6916            # TODO: What if it's a positional argument? :(
6917        else:
6918            kwargs['object_hook'] = lambda o: self.unpack(o)
6919
6920        if 'parseFormat' in kwargs:
6921            self.parseFormat = kwargs['parseFormat']
6922            del kwargs['parseFormat']
6923        else:
6924            self.parseFormat = ParseFormat()
6925
6926        super().__init__(*args, **kwargs)
6927
6928    def unpack(self, obj: Any) -> Any:
6929        """
6930        Unpacks an object; used as the `object_hook` for decoding.
6931        """
6932        if '^^d' in obj:
6933            asType = obj['^^d']
6934            if asType == 't':
6935                return tuple(obj['values'])
6936
6937            elif asType == 'nt':
6938                g = globals()
6939                name = obj['name']
6940                values = obj['values']
6941                # Use an existing global namedtuple class if there is
6942                # one that goes by the specified name, so that we don't
6943                # create too many spurious equivalent namedtuple
6944                # classes. But fall back on creating a new namedtuple
6945                # class if we need to:
6946                ntClass = g.get(name)
6947                if (
6948                    ntClass is None
6949                 or not issubclass(ntClass, tuple)
6950                 or not hasattr(ntClass, '_asdict')
6951                ):
6952                    # Now try again specifically in the base module where
6953                    # most of our nametuples are defined (TODO: NOT this
6954                    # hack..., but it does make isinstance work...)
6955                    ntClass = getattr(base, name, None)
6956                    if (
6957                        ntClass is None
6958                     or not issubclass(ntClass, tuple)
6959                     or not hasattr(ntClass, '_asdict')
6960                    ):
6961                        # TODO: cache these...
6962                        ntClass = collections.namedtuple(  # type: ignore
6963                            name,
6964                            values.keys()
6965                        )
6966                ntClass = cast(Callable, ntClass)
6967                return ntClass(**values)
6968
6969            elif asType == 's':
6970                return set(obj['values'])
6971
6972            elif asType == 'd':
6973                return dict(obj['items'])
6974
6975            elif asType == 'R':
6976                return self.parseFormat.parseRequirement(obj['value'])
6977
6978            elif asType == 'SC':
6979                return self.parseFormat.parseSkillCombination(obj['value'])
6980
6981            elif asType == 'E':
6982                return self.parseFormat.parseEffect(obj['value'])
6983
6984            elif asType == 'Ch':
6985                return self.parseFormat.parseChallenge(obj['value'])
6986
6987            elif asType == 'Cd':
6988                return self.parseFormat.parseCondition(obj['value'])
6989
6990            elif asType == 'Cq':
6991                return self.parseFormat.parseConsequence(obj['value'])
6992
6993            elif asType == 'DG':
6994                baseGraph: networkx.MultiDiGraph = networkx.node_link_graph(
6995                    obj['node_links'],
6996                    edges="links"
6997                )  # type: ignore
6998                # TODO: Fix networkx stubs
6999                graphResult = core.DecisionGraph()
7000                # Copy over non-internal attributes
7001                for attr in dir(baseGraph):
7002                    if attr == "name":
7003                        continue
7004                    if not attr.startswith('__') or not attr.endswith('__'):
7005                        val = getattr(baseGraph, attr)
7006                        setattr(
7007                            graphResult,
7008                            attr,
7009                            copy.deepcopy(val)
7010                            # TODO: Does this copying disentangle too
7011                            # much? Which values even get copied this
7012                            # way?
7013                        )
7014
7015                if baseGraph.name != '':
7016                    graphResult.name = baseGraph.name
7017                graphResult.graph.update(obj['props'])  # type:ignore [attr-defined]  # noqa
7018                storedByEdge = obj['_byEdge']
7019                graphResult._byEdge = {
7020                    int(k): storedByEdge[k]
7021                    for k in storedByEdge
7022                }
7023                graphResult.zones = obj['zones']
7024                graphResult.unknownCount = obj['unknownCount']
7025                graphResult.equivalences = obj['equivalences']
7026                graphResult.reversionTypes = obj['reversionTypes']
7027                graphResult.nextMechanismID = obj['nextMechanismID']
7028                graphResult.mechanisms = {
7029                    int(k): v
7030                    for k, v in
7031                    obj['mechanisms'].items()
7032                }
7033                graphResult.globalMechanisms = obj['globalMechanisms']
7034                graphResult.nameLookup = obj['nameLookup']
7035                return graphResult
7036
7037            elif asType == 'DE':
7038                exResult = core.DiscreteExploration()
7039                exResult.situations = obj['situations']
7040                return exResult
7041
7042            elif asType == 'MS':
7043                msResult = base.MetricSpace(obj['name'])
7044                msResult.points = obj['points']
7045                msResult.nextID = obj['lastID'] + 1
7046                return msResult
7047
7048            else:
7049                raise NotImplementedError(
7050                    f"No special handling has been defined for"
7051                    f" decoding type '{asType}'."
7052                )
7053
7054        else:
7055            return obj

A custom JSON decoder that has special protocols for handling several types, including:

  • set
  • tuple & namedtuple
  • dict (where keys aren't all strings)
  • Requirement
  • SkillCombination
  • DecisionGraph
  • DiscreteExploration
  • MetricSpace

Used by toJSON

When initializing it, you can st a custom parse format by supplying a 'parseFormat' keyword argument; by default a standard ParseFormat will be used.

Examples:

>>> r = base.ReqAny([
...     base.ReqCapability('power'),
...     base.ReqTokens('money', 5)
... ])
>>> s = toJSON(r)
>>> s
'{"^^d": "R", "value": "(power|money*5)"}'
>>> l = fromJSON(s)
>>> r == l
True
>>> o = {1, 2, 'hi'}
>>> s = toJSON(o)
>>> s
'{"^^d": "s", "values": [1, 2, "hi"]}'
>>> l = fromJSON(s)
>>> o == l
True
>>> zi = base.ZoneInfo(1, set(), set(), {}, [])
>>> s = toJSON(zi)
>>> c = (
... '{"^^d": "nt", "name": "ZoneInfo", "values": {'
... '"level": 1,'
... ' "parents": {"^^d": "s", "values": []},'
... ' "contents": {"^^d": "s", "values": []},'
... ' "tags": {},'
... ' "annotations": []'
... '}}'
... )
>>> s == c
True
>>> setm = base.effect(set=("door", "open"))
>>> s = toJSON(setm)
>>> f = fromJSON(s)
>>> f == setm
True
>>> pf = ParseFormat()
>>> pf.unparseEffect(f)
'set door:open'
>>> pf.unparseEffect(f) == pf.unparseEffect(setm)
True

TODO: SkillCombination example

CustomJSONDecoder(*args, **kwargs)
6910    def __init__(self, *args, **kwargs):
6911        if 'object_hook' in kwargs:
6912            outerHook = kwargs['object_hook']
6913            kwargs['object_hook'] = (
6914                lambda o: outerHook(self.unpack(o))
6915            )
6916            # TODO: What if it's a positional argument? :(
6917        else:
6918            kwargs['object_hook'] = lambda o: self.unpack(o)
6919
6920        if 'parseFormat' in kwargs:
6921            self.parseFormat = kwargs['parseFormat']
6922            del kwargs['parseFormat']
6923        else:
6924            self.parseFormat = ParseFormat()
6925
6926        super().__init__(*args, **kwargs)

object_hook, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given dict. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting).

object_pairs_hook, if specified will be called with the result of every JSON object decoded with an ordered list of pairs. The return value of object_pairs_hook will be used instead of the dict. This feature can be used to implement custom decoders. If object_hook is also defined, the object_pairs_hook takes priority.

parse_float, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal).

parse_int, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float).

parse_constant, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered.

If strict is false (true is the default), then control characters will be allowed inside strings. Control characters in this context are those with character codes in the 0-31 range, including '\t' (tab), '\n', '\r' and '\0'.

def unpack(self, obj: Any) -> Any:
6928    def unpack(self, obj: Any) -> Any:
6929        """
6930        Unpacks an object; used as the `object_hook` for decoding.
6931        """
6932        if '^^d' in obj:
6933            asType = obj['^^d']
6934            if asType == 't':
6935                return tuple(obj['values'])
6936
6937            elif asType == 'nt':
6938                g = globals()
6939                name = obj['name']
6940                values = obj['values']
6941                # Use an existing global namedtuple class if there is
6942                # one that goes by the specified name, so that we don't
6943                # create too many spurious equivalent namedtuple
6944                # classes. But fall back on creating a new namedtuple
6945                # class if we need to:
6946                ntClass = g.get(name)
6947                if (
6948                    ntClass is None
6949                 or not issubclass(ntClass, tuple)
6950                 or not hasattr(ntClass, '_asdict')
6951                ):
6952                    # Now try again specifically in the base module where
6953                    # most of our nametuples are defined (TODO: NOT this
6954                    # hack..., but it does make isinstance work...)
6955                    ntClass = getattr(base, name, None)
6956                    if (
6957                        ntClass is None
6958                     or not issubclass(ntClass, tuple)
6959                     or not hasattr(ntClass, '_asdict')
6960                    ):
6961                        # TODO: cache these...
6962                        ntClass = collections.namedtuple(  # type: ignore
6963                            name,
6964                            values.keys()
6965                        )
6966                ntClass = cast(Callable, ntClass)
6967                return ntClass(**values)
6968
6969            elif asType == 's':
6970                return set(obj['values'])
6971
6972            elif asType == 'd':
6973                return dict(obj['items'])
6974
6975            elif asType == 'R':
6976                return self.parseFormat.parseRequirement(obj['value'])
6977
6978            elif asType == 'SC':
6979                return self.parseFormat.parseSkillCombination(obj['value'])
6980
6981            elif asType == 'E':
6982                return self.parseFormat.parseEffect(obj['value'])
6983
6984            elif asType == 'Ch':
6985                return self.parseFormat.parseChallenge(obj['value'])
6986
6987            elif asType == 'Cd':
6988                return self.parseFormat.parseCondition(obj['value'])
6989
6990            elif asType == 'Cq':
6991                return self.parseFormat.parseConsequence(obj['value'])
6992
6993            elif asType == 'DG':
6994                baseGraph: networkx.MultiDiGraph = networkx.node_link_graph(
6995                    obj['node_links'],
6996                    edges="links"
6997                )  # type: ignore
6998                # TODO: Fix networkx stubs
6999                graphResult = core.DecisionGraph()
7000                # Copy over non-internal attributes
7001                for attr in dir(baseGraph):
7002                    if attr == "name":
7003                        continue
7004                    if not attr.startswith('__') or not attr.endswith('__'):
7005                        val = getattr(baseGraph, attr)
7006                        setattr(
7007                            graphResult,
7008                            attr,
7009                            copy.deepcopy(val)
7010                            # TODO: Does this copying disentangle too
7011                            # much? Which values even get copied this
7012                            # way?
7013                        )
7014
7015                if baseGraph.name != '':
7016                    graphResult.name = baseGraph.name
7017                graphResult.graph.update(obj['props'])  # type:ignore [attr-defined]  # noqa
7018                storedByEdge = obj['_byEdge']
7019                graphResult._byEdge = {
7020                    int(k): storedByEdge[k]
7021                    for k in storedByEdge
7022                }
7023                graphResult.zones = obj['zones']
7024                graphResult.unknownCount = obj['unknownCount']
7025                graphResult.equivalences = obj['equivalences']
7026                graphResult.reversionTypes = obj['reversionTypes']
7027                graphResult.nextMechanismID = obj['nextMechanismID']
7028                graphResult.mechanisms = {
7029                    int(k): v
7030                    for k, v in
7031                    obj['mechanisms'].items()
7032                }
7033                graphResult.globalMechanisms = obj['globalMechanisms']
7034                graphResult.nameLookup = obj['nameLookup']
7035                return graphResult
7036
7037            elif asType == 'DE':
7038                exResult = core.DiscreteExploration()
7039                exResult.situations = obj['situations']
7040                return exResult
7041
7042            elif asType == 'MS':
7043                msResult = base.MetricSpace(obj['name'])
7044                msResult.points = obj['points']
7045                msResult.nextID = obj['lastID'] + 1
7046                return msResult
7047
7048            else:
7049                raise NotImplementedError(
7050                    f"No special handling has been defined for"
7051                    f" decoding type '{asType}'."
7052                )
7053
7054        else:
7055            return obj

Unpacks an object; used as the object_hook for decoding.

Inherited Members
json.decoder.JSONDecoder
object_hook
parse_float
parse_int
parse_constant
strict
object_pairs_hook
parse_object
parse_array
parse_string
memo
scan_once
decode
raw_decode