# @+leo-ver=5-thin
# @+node:ekr.20170302123956.1: * @file ../doc/leoAttic.txt
# This is Leo's final resting place for dead code.
# New in Leo 6.7.5. The attic will contain only code retired in the present release.

# @@language python
# @@killbeautify
# @+all
# @+node:ekr.20240617085704.1: ** Permanent attic
# @+node:ekr.20230913144248.1: *3* g.SherlockTracer
# I am going to leave this class in the attic indefinitely.
# It might be useful as the base for other classes.
# @+node:ekr.20121128031949.12605: *4* class g.SherlockTracer
class SherlockTracer:
    """
    A stand-alone tracer class with many of Sherlock's features.

    This class should work in any environment containing the re, os and sys modules.

    The arguments in the pattern lists determine which functions get traced
    or which stats get printed. Each pattern starts with "+", "-", "+:" or
    "-:", followed by a regular expression::

    "+x"  Enables tracing (or stats) for all functions/methods whose name
          matches the regular expression x.
    "-x"  Disables tracing for functions/methods.
    "+:x" Enables tracing for all functions in the **file** whose name matches x.
    "-:x" Disables tracing for an entire file.

    Enabling and disabling depends on the order of arguments in the pattern
    list. Consider the arguments for the Rope trace::

    patterns=['+.*','+:.*',
        '-:.*\\lib\\.*','+:.*rope.*','-:.*leoGlobals.py',
        '-:.*worder.py','-:.*prefs.py','-:.*resources.py',])

    This enables tracing for everything, then disables tracing for all
    library modules, except for all rope modules. Finally, it disables the
    tracing for Rope's worder, prefs and resources modules.

    Being able to zero in on the code of interest can be a big help in
    studying other people's code. This is a non-invasive method: no tracing
    code needs to be inserted anywhere.

    Usage:

    g.SherlockTracer(patterns).run()
    """
    @others
# @+node:ekr.20121128031949.12602: *5* sherlock.__init__
def __init__(
    self,
    patterns: list[Any],
    indent: bool = True,
    show_args: bool = True,
    show_return: bool = True,
    verbose: bool = True,
) -> None:
    """SherlockTracer ctor."""
    self.bad_patterns: list[str] = []  # List of bad patterns.
    self.indent = indent  # True: indent calls and returns.
    self.contents_d: dict[str, list] = {}  # Keys are file names, values are file lines.
    self.n = 0  # The frame level on entry to run.
    self.stats: dict[str, dict] = {}  # Keys are full file names, values are dicts.
    self.patterns: list[Any] = None  # A list of regex patterns to match.
    self.pattern_stack: list[str] = []
    self.show_args = show_args  # True: show args for each function call.
    self.show_return = show_return  # True: show returns from each function.
    self.trace_lines = True  # True: trace lines in enabled functions.
    self.verbose = verbose  # True: print filename:func
    self.set_patterns(patterns)
    try:  # Don't assume g.app exists.
        from leo.core.leoQt import QtCore
        if QtCore:
            # pylint: disable=no-member
            QtCore.pyqtRemoveInputHook()
    except Exception:
        pass
# @+node:ekr.20140326100337.16844: *5* sherlock.__call__
def __call__(self, frame: Any, event: Any, arg: Any) -> Any:
    """Exists so that self.dispatch can return self."""
    return self.dispatch(frame, event, arg)
# @+node:ekr.20140326100337.16846: *5* sherlock.bad_pattern
def bad_pattern(self, pattern: Any) -> None:
    """Report a bad Sherlock pattern."""
    if pattern not in self.bad_patterns:
        self.bad_patterns.append(pattern)
        print(f"\nignoring bad pattern: {pattern}\n")
# @+node:ekr.20140326100337.16847: *5* sherlock.check_pattern
def check_pattern(self, pattern: str) -> bool:
    """Give an error and return False for an invalid pattern."""
    try:
        for prefix in ('+:', '-:', '+', '-'):
            if pattern.startswith(prefix):
                re.match(pattern[len(prefix) :], 'xyzzy')
                return True
        self.bad_pattern(pattern)
        return False
    except Exception:
        self.bad_pattern(pattern)
        return False
# @+node:ekr.20121128031949.12609: *5* sherlock.dispatch
def dispatch(self, frame: Any, event: Any, arg: Any) -> Any:
    """The dispatch method."""
    if event == 'call':
        self.do_call(frame, arg)
    elif event == 'return' and self.show_return:
        self.do_return(frame, arg)
    elif event == 'line' and self.trace_lines:
        self.do_line(frame, arg)
    # Queue the SherlockTracer instance again.
    return self
# @+node:ekr.20121128031949.12603: *5* sherlock.do_call & helper
def do_call(self, frame: Any, unused_arg: Any) -> None:
    """Trace through a function call."""
    frame1 = frame
    code = frame.f_code
    file_name = code.co_filename
    locals_ = frame.f_locals
    function_name = code.co_name
    try:
        full_name = self.get_full_name(locals_, function_name)
    except Exception:
        full_name = function_name
    if not self.is_enabled(file_name, full_name, self.patterns):
        # 2020/09/09: Don't touch, for example, __ methods.
        return
    n = 0  # The number of callers of this def.
    while frame:
        frame = frame.f_back
        n += 1
    indent = ' ' * max(0, n - self.n) if self.indent else ''
    path = f"{os.path.basename(file_name):>20}" if self.verbose else ''
    leadin = '+' if self.show_return else ''
    args_list = self.get_args(frame1)
    if self.show_args and args_list:
        args_s = ','.join(args_list)
        args_s2 = f"({args_s})"
        if len(args_s2) > 100:
            print(f"{path}:{indent}{leadin}{full_name}")
            g.printObj(args_list, indent=len(indent) + 22)
        else:
            print(f"{path}:{indent}{leadin}{full_name}{args_s2}")
    else:
        print(f"{path}:{indent}{leadin}{full_name}")
    # Always update stats.
    d = self.stats.get(file_name, {})
    d[full_name] = 1 + d.get(full_name, 0)
    self.stats[file_name] = d
# @+node:ekr.20130111185820.10194: *6* sherlock.get_args
def get_args(self, frame: Any) -> list[str]:
    """Return a list of string "name=val" for each arg in the function call."""
    code = frame.f_code
    locals_ = frame.f_locals
    name = code.co_name
    n = code.co_argcount
    if code.co_flags & 4:
        n = n + 1
    if code.co_flags & 8:
        n = n + 1
    result = []
    for i in range(n):
        name = code.co_varnames[i]
        if name != 'self':
            arg = locals_.get(name, '*undefined*')
            if arg:
                if isinstance(arg, (list, tuple)):
                    val_s = ','.join([self.show(z) for z in arg if self.show(z)])
                    val = f"[{val_s}]"
                elif isinstance(arg, str):
                    val = arg
                else:
                    val = self.show(arg)
                if val:
                    result.append(f"{name}={val}")
    return result
# @+node:ekr.20140402060647.16845: *5* sherlock.do_line (not used)
bad_fns: list[str] = []

def do_line(self, frame: Any, arg: Any) -> None:
    """print each line of enabled functions."""
    if 1:
        return
    code = frame.f_code
    file_name = code.co_filename
    locals_ = frame.f_locals
    name = code.co_name
    full_name = self.get_full_name(locals_, name)
    if not self.is_enabled(file_name, full_name, self.patterns):
        return
    n = frame.f_lineno - 1  # Apparently, the first line is line 1.
    d = self.contents_d
    lines = d.get(file_name)
    if not lines:
        print(file_name)
        try:
            with open(file_name) as f:
                s = f.read()
        except Exception:
            if file_name not in self.bad_fns:
                self.bad_fns.append(file_name)
                print(f"open({file_name}) failed")
            return
        lines = g.splitLines(s)
        d[file_name] = lines
    line = lines[n].rstrip() if n < len(lines) else '<EOF>'
    if 0:
        print(f"{name:3} {line}")
    else:
        print(f"{g.shortFileName(file_name)} {n} {full_name} {line}")
# @+node:ekr.20130109154743.10172: *5* sherlock.do_return & helper
def do_return(self, frame: Any, arg: Any) -> None:  # Arg *is* used below.
    """Trace a return statement."""
    code = frame.f_code
    fn = code.co_filename
    locals_ = frame.f_locals
    name = code.co_name
    self.full_name = self.get_full_name(locals_, name)
    if not self.is_enabled(fn, self.full_name, self.patterns):
        return
    n = 0
    while frame:
        frame = frame.f_back
        n += 1
    path = f"{os.path.basename(fn):>20}" if self.verbose else ''
    if name and name == '__init__':
        try:
            ret1 = locals_ and locals_.get('self', None)
            self.put_ret(ret1, n, path)
        except NameError:
            self.put_ret(f"<{ret1.__class__.__name__}>", n, path)
    else:
        self.put_ret(arg, n, path)
# @+node:ekr.20220605141445.1: *6* sherlock.put_ret
def put_ret(self, arg: Any, n: int, path: str) -> None:
    """Print arg, the value returned by a "return" statement."""
    indent = ' ' * max(0, n - self.n + 1) if self.indent else ''
    try:
        if isinstance(arg, types.GeneratorType):
            ret = '<generator>'
        elif isinstance(arg, (tuple, list)):
            ret_s = ','.join([self.show(z) for z in arg])
            if len(ret_s) > 40:
                g.printObj(arg, indent=len(indent))
                ret = ''
            else:
                ret = f"[{ret_s}]"
        elif arg:
            ret = self.show(arg)
            if len(ret) > 100:
                ret = f"\n    {ret}"
        else:
            ret = '' if arg is None else repr(arg)
        print(f"{path}:{indent}-{self.full_name} -> {ret}")
    except Exception:
        exctype, value = sys.exc_info()[:2]
        try:  # Be extra careful.
            arg_s = f"arg: {arg!r}"
        except Exception:
            arg_s = ''  # arg.__class__.__name__
        print(
            f"{path}:{indent}-{self.full_name} -> "
            f"{exctype.__name__}, {value} {arg_s}"
        )
# @+node:ekr.20121128111829.12185: *5* sherlock.fn_is_enabled
def fn_is_enabled(self, func: Any, patterns: list[str]) -> bool:
    """Return True if tracing for the given function is enabled."""
    if func in self.ignored_functions:
        return False

    def ignore_function() -> None:
        if func not in self.ignored_functions:
            self.ignored_functions.append(func)
            print(f"Ignore function: {func}")
    #
    # New in Leo 6.3. Never trace dangerous functions.
    table = (
        '_deepcopy.*',
        # Unicode primitives.
        'encode\b', 'decode\b',
        # System functions
        '.*__next\b',
        '<frozen>', '<genexpr>', '<listcomp>',
        # '<decorator-gen-.*>',
        'get\b',
        # String primitives.
        'append\b', 'split\b', 'join\b',
        # File primitives...
        'access_check\b', 'expanduser\b', 'exists\b', 'find_spec\b',
        'abspath\b', 'normcase\b', 'normpath\b', 'splitdrive\b',
    )
    g.trace('=====', func)
    for z in table:
        if re.match(z, func):
            ignore_function()
            return False
    #
    # Legacy code.
    try:
        enabled, pattern = False, None
        for pattern in patterns:
            if pattern.startswith('+:'):
                if re.match(pattern[2:], func):
                    enabled = True
            elif pattern.startswith('-:'):
                if re.match(pattern[2:], func):
                    enabled = False
        return enabled
    except Exception:
        self.bad_pattern(pattern)
        return False
# @+node:ekr.20130112093655.10195: *5* sherlock.get_full_name
def get_full_name(self, locals_: Any, name: str) -> str:
    """Return class_name::name if possible."""
    full_name = name
    try:
        user_self = locals_ and locals_.get('self', None)
        if user_self:
            full_name = user_self.__class__.__name__ + '::' + name
    except Exception:
        pass
    return full_name
# @+node:ekr.20121128111829.12183: *5* sherlock.is_enabled
ignored_files: list[str] = []  # List of files.
ignored_functions: list[str] = []  # List of files.

def is_enabled(
    self,
    file_name: str,
    function_name: str,
    patterns: list[str] = None,
) -> bool:
    """Return True if tracing for function_name in the given file is enabled."""
    #
    # New in Leo 6.3. Never trace through some files.
    if not os:
        return False  # Shutting down.
    base_name = os.path.basename(file_name)
    if base_name in self.ignored_files:
        return False

    def ignore_file() -> None:
        if base_name not in self.ignored_files:
            self.ignored_files.append(base_name)

    def ignore_function() -> None:
        if function_name not in self.ignored_functions:
            self.ignored_functions.append(function_name)

    if f"{os.sep}lib{os.sep}" in file_name:
        ignore_file()
        return False
    if base_name.startswith('<') and base_name.endswith('>'):
        ignore_file()
        return False
    #
    # New in Leo 6.3. Never trace dangerous functions.
    table = (
        '_deepcopy.*',
        # Unicode primitives.
        'encode\b', 'decode\b',
        # System functions
        '.*__next\b',
        '<frozen>', '<genexpr>', '<listcomp>',
        # '<decorator-gen-.*>',
        'get\b',
        # String primitives.
        'append\b', 'split\b', 'join\b',
        # File primitives...
        'access_check\b', 'expanduser\b', 'exists\b', 'find_spec\b',
        'abspath\b', 'normcase\b', 'normpath\b', 'splitdrive\b',
    )
    for z in table:
        if re.match(z, function_name):
            ignore_function()
            return False
    #
    # Legacy code.
    enabled = False
    if patterns is None:
        patterns = self.patterns
    for pattern in patterns:
        try:
            if pattern.startswith('+:'):
                if re.match(pattern[2:], file_name):
                    enabled = True
            elif pattern.startswith('-:'):
                if re.match(pattern[2:], file_name):
                    enabled = False
            elif pattern.startswith('+'):
                if re.match(pattern[1:], function_name):
                    enabled = True
            elif pattern.startswith('-'):
                if re.match(pattern[1:], function_name):
                    enabled = False
            else:
                self.bad_pattern(pattern)
        except Exception:
            self.bad_pattern(pattern)
    return enabled
# @+node:ekr.20121128111829.12182: *5* sherlock.print_stats
def print_stats(self, patterns: list[str] = None) -> None:
    """Print all accumulated statisitics."""
    print('\nSherlock statistics...')
    if not patterns:
        patterns = ['+.*', '+:.*',]
    for fn in sorted(self.stats.keys()):
        d = self.stats.get(fn)
        if self.fn_is_enabled(fn, patterns):
            result = sorted(d.keys())  # type:ignore
        else:
            result = [key for key in sorted(d.keys())  # type:ignore
                if self.is_enabled(fn, key, patterns)]
        if result:
            print('')
            fn = fn.replace('\\', '/')
            parts = fn.split('/')
            print('/'.join(parts[-2:]))
            for key in result:
                print(f"{d.get(key):4} {key}")
# @+node:ekr.20121128031949.12614: *5* sherlock.run
# Modified from pdb.Pdb.set_trace.

def run(self, frame: Any = None) -> None:
    """Trace from the given frame or the caller's frame."""
    print("SherlockTracer.run:patterns:\n%s" % '\n'.join(self.patterns))
    if frame is None:
        frame = sys._getframe().f_back
    # Compute self.n, the number of frames to ignore.
    self.n = 0
    while frame:
        frame = frame.f_back
        self.n += 1
    # Pass self to sys.settrace to give easy access to all methods.
    sys.settrace(self)
# @+node:ekr.20140322090829.16834: *5* sherlock.push & pop
def push(self, patterns: list[str]) -> None:
    """Push the old patterns and set the new."""
    self.pattern_stack.append(self.patterns)  # type:ignore
    self.set_patterns(patterns)
    print(f"SherlockTracer.push: {self.patterns}")

def pop(self) -> None:
    """Restore the pushed patterns."""
    if self.pattern_stack:
        self.patterns = self.pattern_stack.pop()  # type:ignore
        print(f"SherlockTracer.pop: {self.patterns}")
    else:
        print('SherlockTracer.pop: pattern stack underflow')
# @+node:ekr.20140326100337.16845: *5* sherlock.set_patterns
def set_patterns(self, patterns: list[str]) -> None:
    """Set the patterns in effect."""
    self.patterns = [z for z in patterns if self.check_pattern(z)]
# @+node:ekr.20140322090829.16831: *5* sherlock.show
def show(self, item: Any) -> str:
    """return the best representation of item."""
    if not item:
        return repr(item)
    if isinstance(item, dict):
        return 'dict'
    if isinstance(item, str):
        s = repr(item)
        if len(s) <= 20:
            return s
        return s[:17] + '...'
    s = repr(item)
    # A Hack for mypy:
    if s.startswith("<object object"):
        s = "_dummy"
    return s
# @+node:ekr.20121128093229.12616: *5* sherlock.stop
def stop(self) -> None:
    """Stop all tracing."""
    sys.settrace(None)
# @+node:ekr.20181202062518.1: *3* leoFastRedraw.py
"""
Gui-independent fast-redraw code.

For an explanation, see this thread:
https://groups.google.com/forum/#!topic/leo-editor/hpHyHU2sWtM
"""
import difflib
import re
import time
from leo.core import leoGlobals as g

class FastRedraw:
    @others

@language python
@tabwidth -4
@pagewidth 70
# @+node:ekr.20181202060924.4: *4* LeoGui.dump_diff_op_codes
def dump_diff_op_codes(self, a, b, op_codes):
    """Dump the opcodes returned by difflib.SequenceMatcher."""

    def summarize(aList):
        pat = re.compile(r'.*:.*:(.*)')
        return ', '.join([pat.match(z).group(1) for z in aList])

    for tag, i1, i2, j1, j2 in op_codes:
        if tag == 'equal':
            print(f"{tag:7} at {i1}:{i2} (both) ==> {summarize(b[j1:j2])!r}")
        elif tag == 'insert':
            print(f"{tag:7} at {i1}:{i2} (b)    ==> {summarize(b[j1:j2])!r}")
        elif tag == 'delete':
            print(f"{tag:7} at {i1}:{i2} (a)    ==> {summarize(a[i1:i2])!r}")
        elif tag == 'replace':
            print(f"{tag:7} at {i1}:{i2} (a)    ==> {summarize(a[i1:i2])!r}")
            print(f"{tag:7} at {i1}:{i2} (b)    ==> {summarize(b[j1:j2])!r}")
        else:
            print('unknown tag')
# @+node:ekr.20181202060924.5: *4* LeoGui.dump_opcodes
def dump_opcodes(self, opcodes):
    """Dump the opcodes returned by app.peep_hole and app.make_redraw_list."""
    for z in opcodes:
        kind = z[0]
        if kind == 'replace':
            kind, i1, gnxs1, gnxs2 = z
            print(kind, i1)
            print("  a: [%s]" % ',\n    '.join(gnxs1))
            print("  b: [%s]" % ',\n    '.join(gnxs2))
        elif kind in ('delete', 'insert'):
            kind, i1, gnxs = z
            print(kind, i1)
            print("  [%s]" % ',\n    '.join(gnxs))
        else:
            print(z)
# @+node:ekr.20181202060924.2: *4* LeoGui.flatten_outline
def flatten_outline(self, c):
    """Return a flat list of strings "level:gnx" for all *visible* positions."""
    trace = False and not g.unitTesting
    t1 = time.process_time()
    aList: list[str] = []
    for p in c.rootPosition().self_and_siblings():
        self.extend_flattened_outline(aList, p)
    if trace:
        t2 = time.process_time()
        print(f"app.flatten_outline: {len(aList)} entries {t2 - t1:6.4f} sec.")
    return aList

def extend_flattened_outline(self, aList, p):
    """Add p and all p's visible descendants to aList."""
    # Padding the fields causes problems later.
    aList.append(f"{p.level()}:{p.gnx}:{p.h}\n")
    if p.isExpanded():
        for child in p.children():
            self.extend_flattened_outline(aList, child)
# @+node:ekr.20181202060924.3: *4* LeoGui.make_redraw_list
def make_redraw_list(self, a, b):
    """
    Diff the a (old) and b (new) outline lists.
    Then optimize the diffs to create a redraw instruction list.
    """
    trace = False and not g.unitTesting
    if a == b:
        return []

    def gnxs(aList):
        """Return the gnx list. Do not try to remove this!"""
        return [z.strip() for z in aList]
    @others # Define local helpers
    d = difflib.SequenceMatcher(None, a, b)
    op_codes = list(d.get_opcodes())
    # dump_diff_op_codes(a, b, op_codes)
    #
    # Generate the instruction list, and verify the result.
    opcodes, result = [], []
    for tag, i1, i2, j1, j2 in op_codes:
        if tag == 'insert':
            opcodes.append(['insert', i1, gnxs(b[j1:j2])])
        elif tag == 'delete':
            opcodes.append(['delete', i1, gnxs(a[i1:i2])])
        elif tag == 'replace':
            opcodes.append(['replace', i1, gnxs(a[i1:i2]), gnxs(b[j1:j2])])
        result.extend(b[j1:j2])
    assert b == result, (a, b)
    #
    # Run the peephole.
    opcodes = self.peep_hole(opcodes)
    if trace:
        print('app.make_redraw_list: opcodes after peephole...')
        self.dump_opcodes(opcodes)
    return opcodes
# @+node:ekr.20181202060924.6: *4* LeoGui.peep_hole
def peep_hole(self, opcodes):
    """Scan the list of opcodes, merging adjacent op-codes."""
    i, result = 0, []
    while i < len(opcodes):
        op0 = opcodes[i]
        if i == len(opcodes) - 1:
            result.append(op0)
            break
        op1 = opcodes[i + 1]
        kind0, kind1 = op0[0], op1[0]
        # Merge adjacent insert/delete opcodes with the same gnx.
        if (
            kind0 == 'insert' and kind1 == 'delete' or
            kind0 == 'delete' and kind1 == 'insert'
        ):
            kind0, index0, gnxs0 = op0
            kind1, index1, gnxs1 = op1
            if gnxs0[0] == gnxs1[0]:
                result.append(['move', index0, index1, gnxs0, gnxs1])
                i += 2  # Don't scan either op again!
                break
        # The default is to retain the opcode.
        result.append(op0)
        i += 1
    return result
# @+node:ekr.20181103094900.1: *3* ../plugins/leoflexx.py

# Disable mypy checking.
# type:ignore

@language python
@tabwidth -4
# pylint: disable=logging-not-lazy,unused-private-member,wrong-import-position

<< leoflexx: docstring >>
<< leoflexx: imports >>
<< leoflexx: assets >>

# flexx can't handle generators, so we *must* use comprehensions instead.
# pylint: disable=use-a-generator

@others
# @+node:ekr.20181122215342.1: *4* << leoflexx: docstring >>
@language md
@wrap
"""
flexx.py: LeoWapp (Leo as a web browser), implemented using flexx:
https://flexx.readthedocs.io/en/stable/

Start Leo using the --gui=browser option. Like this::

    leo --gui=browser
    leo --gui=browser-firefox

This file is the main line of the LeoWapp project.
https://github.com/leo-editor/leo-editor/issues/1005.

See #1005 for a status report and list of to-do's.

# Prerequisites

Install flexx: https://flexx.readthedocs.io/en/stable/start.html

# What you should see

You should see the flexx (Tornado) server start up in the console.
Something that looks like Leo should then appear in the browser. Everything
you see is real, and most of it is "live".
"""
# @+node:ekr.20181113041314.1: *4* << leoflexx: imports >>
import os
import re
import sys
import time
from typing import Optional

path = os.getcwd()
if path not in sys.path:
    sys.path.insert(0, path)
del path

# This is what Leo typically does.
# JS code can *not* use g.trace, g.callers or g.pdb.
from leo.core import leoGlobals as g
from leo.core import leoFrame
from leo.core import leoGui
from leo.core import leoMenu
from leo.core import leoNodes
from leo.core.leoAPI import StringTextWrapper
# Third-party imports.
try:
    from flexx import flx
    from pscript import RawJS
except Exception:
    flx = None
# @+node:ekr.20181111074958.1: *4* << leoflexx: assets >>
# Assets for ace JS editor.
base_url = 'https://cdnjs.cloudflare.com/ajax/libs/ace/1.2.6/'
flx.assets.associate_asset(__name__, base_url + 'ace.js')
flx.assets.associate_asset(__name__, base_url + 'mode-python.js')
flx.assets.associate_asset(__name__, base_url + 'theme-solarized_dark.js')
# @+node:ekr.20181121040901.1: *4*  top-level functions
# @+node:ekr.20181121091633.1: *5* dump_event
def dump_event(ev):
    """Print a description of the event."""
    id_ = ev.source.title or ev.source.text
    kind = '' if ev.new_value else 'un-'
    s = kind + ev.type
    message = '%s: %s' % (s.rjust(15), id_)
    print('dump_event: ' + message)
# @+node:ekr.20181121040857.1: *5* get_root
def get_root():
    """
    Return the LeoBrowserApp instance.

    This is the same as self.root for any flx.Component.
    """
    # Only called at startup, so this will never be None.
    root = flx.loop.get_active_component().root
    assert isinstance(root, LeoBrowserApp), repr(root)
    return root
# @+node:ekr.20181112165240.1: *5* info (deprecated)
def info(s):
    """Send the string s to the flex logger, at level info."""
    if not isinstance(s, str):
        s = repr(s)
    # A hack: automatically add the "Leo" prefix so
    # the top-level suppression logic will not delete this message.
    flx.logger.info('Leo: ' + s)
# @+node:ekr.20181103151350.1: *5* init
def init():
    return flx
# @+node:ekr.20181203151314.1: *5* make_editor_function
def make_editor_function(name, node):
    """
    Instantiate the ace editor.

    Making this a top-level function avoids the need to create a common
    base class that only defines this as a method.
    """
    global window  # pylint doesn't know about the window global.
    ace = window.ace.edit(node, 'editor')  # pylint: disable=undefined-variable
    ace.navigateFileEnd()  # otherwise all lines highlighted
    ace.setTheme("ace/theme/solarized_dark")
    if name == 'body':
        ace.getSession().setMode("ace/mode/python")  # This sets soft tabs.
    if name == 'minibuffer':
        pass  # To do: Disable line numbers.
    return ace
# @+node:ekr.20181113041410.1: *5* suppress_unwanted_log_messages (not used)
def suppress_unwanted_log_messages():
    """
    Suppress the "Automatically scrolling cursor into view" messages by
    *allowing* only important messages.
    """
    allowed = r'(Traceback|Critical|Error|Leo|Session|Starting|Stopping|Warning)'
    pattern = re.compile(allowed, re.IGNORECASE)
    flx.set_log_level('INFO', pattern)
# @+node:ekr.20181115071559.1: *4* Py side: App & wrapper classes
# @+node:ekr.20181127151027.1: *5* class API_Wrapper (StringTextWrapper)
class API_Wrapper(StringTextWrapper):
    """
    A wrapper class that implements the high-level interface.
    """

    def __init__(self, c, name):
        assert name in ('body', 'log', 'minibuffer'), repr(name)
        super().__init__(c, name)
        assert self.c == c
        assert self.name == name
        self.tag = 'py.%s.wrap' % name
        self.root = get_root()

    def flx_wrapper(self):
        if self.root.inited:
            w = self.root.main_window
            return getattr(w, self.name)
        return g.NullObject()

    def setFocus(self):
        # print(self.tag, 'setFocus')
        self.flx_wrapper().set_focus()

    # No need to override getters.
    @others
# @+node:ekr.20181128101421.1: *6* API_Wrapper.Selection Setters
def finish_set_insert(self, tag):
    """Common helper for selection setters."""
    if 'select' in g.app.debug:
        tag = '%s.%s' % (self.tag, 'finish_set_insert')
        print('%30s: %s %r' % (tag, self.ins, self.sel))
    self.flx_wrapper().set_insert_point(self.ins, self.sel)

def seeInsertPoint(self):
    self.flx_wrapper().see_insert_point()

def selectAllText(self, insert=None):
    super().selectAllText(insert)
    self.finish_set_insert('selectAllText')

def setInsertPoint(self, pos, s=None):
    super().setInsertPoint(pos, s)
    self.finish_set_insert('setInsertPoint')

def setSelectionRange(self, i: int, j: int, insert: Optional[int] = None):
    super().setSelectionRange(i, j, insert)
    self.finish_set_insert('setSelectionRange')
# @+node:ekr.20181127121642.1: *6* API_Wrapper.Text Setters
@language rest
@wrap
@
These methods implement Leo's high-level api for the body pane.

Consider Leo's sort-lines command. sort-lines knows nothing about which gui
is in effect. It updates the body pane using *only* the high-level api.

These methods must do two things:

1. Call the corresponding super() method to update self.s, self.i and self.ins.
2. Call the corresponding flx_body methods to update the flx_body widget,
   except while unit testing.
@c
@language python

def finish_setter(self, tag):
    """The common setter code."""
    c = self.c
    tag = 'py.body.setter'
    if self.name == 'body':
        # print('%s: %s' % (tag, len(self.s)))
        # p.b = self.s will cause an unbounded recursion.
        c.p.v.setBodyString(self.s)
    if 0:
        print('%s: %s:  len(self.s): %s ins: %s sel: %r' % (
            self.tag, tag, len(self.s), self.ins, self.sel))
    if not g.unitTesting:
        self.flx_wrapper().set_text(self.s)
        self.flx_wrapper().set_insert_point(self.ins)

def appendText(self, s):
    super().appendText(s)
    self.finish_setter('appendText')

def delete(self, i: int, j: Optional[int] = None):
    super().delete(i, j)
    self.finish_setter('delete')

def deleteTextSelection(self):
    super().deleteTextSelection()
    self.finish_setter('deleteTextSelection')

def insert(self, i: int, s):
    # Called from doPlainChar, insertNewlineHelper, etc. on every keystroke.
    super().insert(i, s)
    self.finish_setter('insert')

def setAllText(self, s):
    # Called by set_body_text_after_select.
    super().setAllText(s)
    self.finish_setter('insert')
# @+node:ekr.20181206153831.1: *5* class DummyFrame
class DummyFrame(leoFrame.NullFrame):
    """
    A frame to keep Leo's core happy until we can call app.finish_create.
    """

    def __repr__(self):
        return 'DummyFrame: %r' % self.c.shortFileName()

    __str__ = __repr__

# @+node:ekr.20181107052522.1: *5* class LeoBrowserApp
# pscript never converts flx.PyComponents to JS.

class LeoBrowserApp(flx.PyComponent):
    """
    The browser component of Leo in the browser.

    This is self.root for all flexx components.

    This is *not* g.app.
    """

    main_window = flx.ComponentProp(settable=True)

    @others
# @+node:ekr.20181126103604.1: *6*  app.Initing
# @+node:ekr.20181114015356.1: *7* app.create_all_data
def create_gnx_to_vnode(self):
    t1 = time.process_time()
    # This is likely the only data that ever will be needed.
    self.gnx_to_vnode = {v.gnx: v for v in self.c.all_unique_nodes()}
    if 0:
        print('app.create_all_data: %5.3f sec. %s entries' % (
            (time.process_time() - t1), len(list(self.gnx_to_vnode.keys()))))
    self.test_round_trip_positions()
# @+node:ekr.20181124133513.1: *7* app.finish_create (leoflexx.py) (patches c.request_focus)
@flx.action
def finish_create(self):
    """
    Initialize all ivars and widgets.

    Called after all flx.Widgets have been fully inited!
    """
    w = self.main_window
    self.c = c = g.app.log.c
    assert c
    # Init all redraw ivars
    self.create_gnx_to_vnode()
    # Select the proper position.
    c.selectPosition(c.p or c.rootPosition())
    c.contractAllHeadlines()
    c.redraw()  # 2380.
    # Monkey-patch the FindTabManager
    c.findCommands.minibuffer_mode = True
    c.findCommands.ftm = g.NullObject()
    # Monkey-patch c.request_focus
    self.old_request_focus = c.request_focus
    c.request_focus = self.request_focus
    # #1143: monkey-patch Leo's save commands.
    self.old_save_file = c.fileCommands.save
    c.fileCommands.save = self.save_file
    self.old_save_file_as = c.fileCommands.saveAs
    c.fileCommands.saveAs = self.save_file_as
    self.old_save_file_to = c.fileCommands.saveTo
    c.fileCommands.saveTo = self.save_file_to
    # Init the log, body, status line and tree.
    g.app.gui.writeWaitingLog2()
    self.set_body_text()
    self.set_status()
    # Init the focus. It's debatable...
    if 0:
        self.gui.set_focus(c, c.frame.tree)
        w.tree.set_focus()
    else:  # This definitely shows focus in the editor.
        self.gui.set_focus(c, c.frame.body)
        w.body.set_focus()
    # Set the inited flag *last*.
    self.inited = True
# @+node:ekr.20181216042806.1: *7* app.init (leoflexx.py)
def init(self):
    # Set the ivars.
    global g  # Always use the imported g.
    self.inited = False  # Set in finish_create
    self.tag = 'py.app.wrap'
    # Open or get the first file.
    if not g.app:
        print('app.init: no g.app. g:', repr(g))
        return
    if not isinstance(g.app.gui, LeoBrowserGui):
        print('app.init: wrong g.app.gui:', repr(g.app.gui))
        return
    if not isinstance(g.app.log, leoFrame.NullLog):
        # This can happen in strange situations.
        # print('app.init: ===== Ctrl-H ===== ?')
        return
    # We are running with --gui=browser.
    c = g.app.log.c
    # self.gui must be a synonym for g.app.gui.
    self.c = c
    self.gui = gui = g.app.gui
    # Make sure everything is as expected.
    assert self.c and c == self.c
    assert g.app.gui.guiName() == 'browser'
    # When running from Leo's core, we must wait until now to set LeoBrowserGui.root.
    gui.root = get_root()
    # Check g.app.ivars.
    assert g.app.windowList
    for frame in g.app.windowList:
        assert isinstance(frame, DummyFrame), repr(frame)
    # Instantiate all wrappers here, not in app.finish_create.
    title = c.computeWindowTitle()
    c.frame = gui.lastFrame = LeoBrowserFrame(c, title, gui)
    # The main window will be created (much) later.
    main_window = LeoFlexxMainWindow()
    self._mutate('main_window', main_window)
# @+node:ekr.20190507110902.1: *6* app.action.cls
@flx.action
def cls(self):
    """Clear the console"""
    g.cls()
# @+node:ekr.20181117163223.1: *6* app.action.do_key
# https://flexx.readthedocs.io/en/stable/ui/widget.html#flexx.ui.Widget.key_down
# See Widget._create_key_event in flexx/ui/_widget.py:

@flx.action
def do_key(self, ev, ivar):
    """
    LeoBrowserApp.do_key: The central key handler.

    Will be called *in addition* to any inner key handlers,
    unless the inner key handler calls e.preventDefault()
    """
    if 'keys' in g.app.debug:
        g.trace(ev, ivar)
    c = self.c
    # Essential: there is no way to pass the actual wrapper.
    browser_wrapper = getattr(c.frame, ivar)
    << check browser_wrapper >>
    # ev is a dict, keys are type, source, key, modifiers
    # mods in ('Alt', 'Shift', 'Ctrl', 'Meta')
    key, mods = ev['key'], ev['modifiers']
    # Special case Ctrl-H and Ctrl-F.
    if mods == ['Ctrl'] and key in 'fh':
        command = 'find' if key == 'f' else 'head'
        self.do_command(command, key, mods)
        return
    << set char to the translated key name >>
    binding = '%s%s' % (''.join(['%s+' % (z) for z in mods]), char)
    << create key_event >>
    c.k.masterKeyHandler(key_event)
# @+node:ekr.20181129073812.1: *7* << check browser_wrapper >>
assert isinstance(browser_wrapper, (
    LeoBrowserBody,
    LeoBrowserLog,
    LeoBrowserMinibuffer,
    LeoBrowserStatusLine,
    LeoBrowserTree,
)), repr(browser_wrapper)
# @+node:ekr.20181129073905.1: *7* << set char to the translated key name >>
d = {
    'ArrowDown': 'Down',
    'ArrowLeft': 'Left',
    'ArrowRight': 'Right',
    'ArrowUp': 'Up',
    'Enter': '\n',  # For k.fullCommand, etc.
    'PageDown': 'Next',
    'PageUp': 'Prior',
}
char = d.get(key, key)
if 'Ctrl' in mods:
    mods.remove('Ctrl')
    mods.append('Control')
# @+node:ekr.20181129073734.1: *7* << create key_event >>
# create the key event, but don't bother tracing it.
old_debug = g.app.debug
try:
    g.app.debug = []
    key_event = leoGui.LeoKeyEvent(c,
        char=char,
        binding=binding,
        event={'c': c},
        w=browser_wrapper,
    )
finally:
    g.app.debug = old_debug
# @+node:ekr.20181124054536.1: *6* app.action.dump_dict
@flx.action
def dump_dict(self, obj, tag):
    # Note: flexx has problems with keyword args.
    g.printObj(obj, tag=tag)

# def message(self, s):
    # """For testing."""
    # print('app.message: %s' % s)
# @+node:ekr.20181207080933.1: *6* app.action.set_body_text & set_status
# These must be separate because they are called from the tree logic.

@flx.action
def set_body_text(self):
    c = self.c
    # Using the wrapper sets the text *and* the insert point and selection range.
    c.frame.body.wrapper.setAllText(c.p.b)
    # print('app.set_body_text', len(c.p.b), c.p.h)

@flx.action
def set_status(self):
    c, w = self.c, self.main_window
    lt, rt = c.frame.statusLine.update(c.p.b, 0)
    w.status_line.update(lt, rt)
# @+node:ekr.20181122132345.1: *6* app.Drawing...
# @+node:ekr.20181113042549.1: *7* app.action.redraw
@flx.action
def redraw(self, p):
    """
    Send a **redraw list** to the tree.

    This is a recusive list lists of items (ap, gnx, headline) describing
    all and *only* the presently visible nodes in the tree.

    As a side effect, app.make_redraw_dict updates all internal dicts.
    """
    trace = 'drawing' in g.app.debug
    tag = 'py.app.redraw'
    c = self.c
    p = p or c.p
    # #1142.
    c.selectPosition(p)
    w = self.main_window
    # Be careful: c.frame.redraw can be called before app.finish_create.
    if not w or not w.tree:
        if trace:
            print(tag, 'no w.tree')
        return
    # Do a full redraw.
    t1 = time.process_time()
    ap = self.p_to_ap(p)
    w.tree.select_ap(ap)
    redraw_dict = self.make_redraw_dict(p)
    w.tree.redraw_with_dict(redraw_dict)
    # Do not call c.setChanged() here.
    if trace:
        print('app.redraw: %5.3f sec.' % (time.process_time() - t1))
# @+node:ekr.20181111095640.1: *7* app.action.send_children_to_tree
@flx.action
def send_children_to_tree(self, parent_ap):
    """
    Call w.tree.receive_children(d), where d is compatible with make_redraw_dict:
        {
            'parent_ap': parent_ap,
            'items': [
                self.make_dict_for_position(p)
                    for p in for p in p.children()
            ],
        }
    """
    p = self.ap_to_p(parent_ap)
    assert p, repr(parent_ap)
    if 0:
        # There is a similar trace in flx.tree.receive_children.
        print('send_children_to_tree: %s children' % len(list(p.children())))
        if 0:  # Corresponds to the trace in flx_tree.populate_children.
            for child in p.children():
                print('  ' + child.h)
    #
    # Always respond, even if there are no children.
    # This allows the tree widget to reset state properly.
    w = self.main_window
    # assert parent_ap == self.p_to_ap(p), '\n%r\n%r' % (parent_ap, self.p_to_ap(p))
        # This assert can fail because the expansion bits don't match.
    w.tree.receive_children({
        'parent_ap': parent_ap,
        'items': [
            self.make_dict_for_position(p)
                for p in p.children()
            # For compatibility with flx.tree.create_item_with_parent.
        ],
    })
# @+node:ekr.20181111203114.1: *7* app.ap_to_p
def ap_to_p(self, ap):
    """Convert an archived position to a true Leo position."""
    childIndex = ap['childIndex']
    v = self.gnx_to_vnode[ap['gnx']]
    stack = [
        (self.gnx_to_vnode[d['gnx']], d['childIndex'])
            for d in ap['stack']
    ]
    return leoNodes.Position(v, childIndex, stack)
# @+node:ekr.20181124071215.1: *7* app.dump_top_level
def dump_top_level(self):
    """Dump the top-level nodes."""
    if 0:
        c = self.c
        print('\napp.dump_top_level...')
        # print('root:', c.rootPosition().h)
        # print(' c.p:', c.p.h)
        # print('')
        # print('Top-level nodes...')
        for p in c.rootPosition().self_and_siblings():
            print('  %5s %s' % (p.isExpanded(), p.h))
        print('')
# @+node:ekr.20181113043539.1: *7* app.make_redraw_dict & helper
def make_redraw_dict(self, p=None):
    """
    Return a **recursive**, archivable, list of lists describing all the
    visible nodes of the tree.

    As a side effect, recreate gnx_to_vnode.
    """
    c = self.c
    p = p or c.p
    t1 = time.process_time()
    c.expandAllAncestors(c.p)  # Ensure that c.p will be shown.
    d = {
        'c.p': self.p_to_ap(p),
        'items': [
            self.make_dict_for_position(p2)
                for p2 in c.rootPosition().self_and_siblings()
        ],
    }
    t2 = time.process_time()
    if 0:
        print('app.make_redraw_dict: %s direct children %5.3f sec.' % (
            len(list(c.rootPosition().self_and_siblings())), (t2 - t1)))
    return d
# @+node:ekr.20181113044701.1: *8* app.make_dict_for_position
def make_dict_for_position(self, p):
    """
    Recursively add a sublist for p and all its visible nodes.
    It is already known that p itself is visible.
    """
    assert p.v
    self.gnx_to_vnode[p.v.gnx] = p.v
    if 0:
        # A superb trace. There are similar traces in:
        # - flx_tree.redraw_with_dict  and its helper, flx_tree.create_item_with_parent.
        # - flx_tree.populate_children and its helper, flx_tree.create_item_for_ap
        print('make_dict_for_position: %s%s' % ('  ' * p.level(), p.v.h))
    if p.isExpanded():  # Do not use p.v.isExpanded().
        children = [
            self.make_dict_for_position(child)
                for child in p.children()
        ]
    else:
        children = []
    return {
        'ap': self.p_to_ap(p),
        # 'body': p.b, # This would transfer much too much data.
        'children': children,
        'gnx': p.v.gnx,
        'headline': p.v.h,
    }
# @+node:ekr.20190511091601.1: *6* app.Editing
# @+node:ekr.20181129122147.1: *7* app.edit_headline & helper
def edit_headline(self):
    """Simulate editing the headline in the minibuffer."""
    w = self.root.main_window
    mb = w.minibuffer
    mb.set_text('Enter Headline: ')
    mb.set_focus()

def edit_headline_completer(self, headline):
    print('app.edit_headline_completer')
# @+node:ekr.20190512081356.1: *6* app.request_focus
def request_focus(self, w):
    """Monkey-patched c.request_focus."""
    tag = 'py.app.request_focus'
    trace = 'focus' in g.app.debug
    if not w:
        print('%30s: NO W: %s' % (tag, g.callers()))
        return
    #
    # Schedule the change in focus.
    mw = self.main_window
    table = (
        ('body', mw.body),
        ('log', mw.log),
        ('mini', mw.minibuffer),
        ('tree', mw.tree),
    )
    for w_name, flx_w in table:
        if w_name in repr(w):
            if trace:
                print('%30s: %s' % (tag, flx_w.__class__.__name__))
            flx_w.set_focus()
            return
    if trace:
        print('%30s: FAIL %r' % (tag, w))
# @+node:ekr.20190511091236.1: *6* app.Minibuffer
# @+node:ekr.20181127070903.1: *7* app.execute_minibuffer_command
def execute_minibuffer_command(self, commandName, char, mods):
    """Start the execution of a minibuffer command."""
    # Called by app.do_command.
    c = self.c
    # New code: execute directly.
    self.complete_minibuffer_command({
        'char': char,
        'commandName': commandName,
        'headline': c.p.h,  # Debugging.
        'mods': mods,
    })
# @+node:ekr.20190510133737.1: *7* app.action.complete_minibuffer_command
@flx.action
def complete_minibuffer_command(self, d):
    """Complete the minibuffer command using d."""
    c = self.c
    k, w = c.k, c.frame.body.wrapper
    #
    # Do the minibuffer command: like k.callAltXFunction.
    commandName, char, mods = d['commandName'], d['char'], d['mods']
    # Same as in app.do_key.
    binding = '%s%s' % (''.join(['%s+' % (z) for z in mods]), char)
    event = g.Bunch(
        c=c,
        char=char,
        stroke=binding,
        widget=w,  # Use the body pane by default.
    )
    # Another hack.
    k.functionTail = None
    if commandName and commandName.isdigit():
        # The line number Easter Egg.
        def func(event=None):
            c.gotoCommands.find_file_line(n=int(commandName))
    else:
        func = c.commandsDict.get(commandName)
    if func:
        # These must be done *after* getting the command.
        k.clearState()
        k.resetLabel()
        if commandName != 'repeat-complex-command':
            k.mb_history.insert(0, commandName)
        w = event and event.widget
        if hasattr(w, 'permanent') and not w.permanent:
            # In a headline that is being edited.
            # g.es('Can not execute commands from headlines')
            c.endEditing()
            c.bodyWantsFocusNow()
            # Change the event widget so we don't refer to the to-be-deleted headline widget.
            event.w = event.widget = c.frame.body.wrapper.widget
        else:
            # Important, so cut-text works, e.g.
            c.widgetWantsFocusNow(event and event.widget)
        try:
            func(event)
        except Exception:
            g.es_exception()
        return True
    if 0:
        # Not ready yet
        # Show possible completions if the command does not exist.
        if 1:  # Useful.
            k.doTabCompletion(list(c.commandsDict.keys()))
        else:  # Annoying.
            k.keyboardQuit()
            k.setStatusLabel('Command does not exist: %s' % commandName)
            c.bodyWantsFocus()
    return False
# @+node:ekr.20190511102058.1: *6* app.Save commands
# These all monkey-patch the corresponding c.fileCommands methods.
# @+node:ekr.20190511100908.1: *7* app.save_file
def save_file(self, fileName):
    """
    Monkey-patched override of c.fileCommands.save.

    Sync p.b before calling the original method.
    """
    if not self.root.inited:
        return
    if 'select' in g.app.debug:
        tag = 'py.app.save_file'
        print('%30s: %s' % (tag, fileName))
    c = self.c
    w = self.root.main_window
    w.body.sync_body_before_save_file({
        'ap': self.root.p_to_ap(c.p),
        'fn': fileName,
    })

@flx.action
def complete_save_file(self, d):
    self.update_body_from_dict_helper(d)  # Use the helper, to skip checks.
    fn = d['fn']
    if 'select' in g.app.debug:
        tag = 'py.app.complete_save_file'
        print('%30s: %s' % (tag, fn))
    self.old_save_file(fn)
# @+node:ekr.20190511102119.1: *7* app.save_file_as
def save_file_as(self, fileName):
    """
    Monkey-patched override of c.fileCommands.saveAs.

    Sync p.b before calling the original method.
    """
    if not self.root.inited:
        return
    if 'select' in g.app.debug:
        tag = 'py.app.save_file_as'
        print('%30s: %s' % (tag, fileName))
    c = self.c
    w = self.root.main_window
    w.body.sync_body_before_save_file_as({
        'ap': self.root.p_to_ap(c.p),
        'fn': fileName,
    })

@flx.action
def complete_save_file_as(self, d):
    self.update_body_from_dict_helper(d)  # Use the helper, to skip checks.
    fn = d['fn']
    if 'select' in g.app.debug:
        tag = 'py.app.complete_save_file'
        print('%30s: %s' % (tag, fn))
    self.old_save_file_as(fn)
# @+node:ekr.20190511102120.1: *7* app.save_file_to
def save_file_to(self, fileName):
    """
    Monkey-patched override of c.fileCommands.saveTo.

    Sync p.b before calling the original method.
    """
    if not self.root.inited:
        return
    if 'select' in g.app.debug:
        tag = 'py.app.save_file_to'
        print('%30s: %s' % (tag, fileName))
    c = self.c
    w = self.root.main_window
    w.body.sync_body_before_save_file_to({
        'ap': self.root.p_to_ap(c.p),
        'fn': fileName,
    })

@flx.action
def complete_save_file_to(self, d):
    self.update_body_from_dict_helper(d)  # Use the helper, to skip checks.
    fn = d['fn']
    if 'select' in g.app.debug:
        tag = 'py.app.complete_save_file'
        print('%30s: %s' % (tag, fn))
    self.old_save_file_to(fn)
# @+node:ekr.20181124095316.1: *6* app.Selecting...
# @+node:ekr.20181216051109.1: *7* app.action.complete_select
@flx.action
def complete_select(self, d):
    """Complete the selection of the d['new_ap']"""
    self.update_body_from_dict(d)
    # tree.complete_select has direct ivars to tree ivars.
    self.c.frame.tree.complete_select(d)
# @+node:ekr.20181111202747.1: *7* app.action.select_ap
@flx.action
def select_ap(self, ap):
    """
    Select the position in Leo's core corresponding to the archived position.

    Nothing in the flx.tree needs to be updated.
    """
    assert ap, g.callers()
    c, w = self.c, self.main_window
    p = self.ap_to_p(ap)
    assert p, (repr(ap), g.callers())
    lt, rt = c.frame.statusLine.update()
    w.status_line.update(lt, rt)
    # print('app.select_ap', repr(ap))
    w.tree.select_ap(ap)
    # call LeoTree.select, but not self.select_p.
    c.frame.tree.super_select(p)
# @+node:ekr.20190506100026.1: *7* app.action.select_minibuffer
@flx.action
def select_minibuffer(self):
    """Select the minibuffer in response to user click."""
    c = self.c
    event = g.app.gui.create_key_event(c, w=c.frame.body.wrapper)
    c.k.fullCommand(event)
# @+node:ekr.20181118061020.1: *7* app.action.select_p
@flx.action
def select_p(self, p):
    """
    Select the position in the tree.

    Called from LeoBrowserTree.select, so do *not* call c.frame.tree.select.
    """
    c = self.c
    w = self.main_window
    ap = self.p_to_ap(p)
    # Be careful during startup.
    if not (w and w.tree):
        return
    if 'select' in g.app.debug:
        tag = 'py.app.select_p'
        print('%30s: %4s %s %s' % (tag, len(p.b), p.gnx, p.h))
    w.tree.set_ap(ap)
    # #1142: This code was not the problem.
    body = c.frame.body.wrapper
    w.body.set_text(body.s)
    w.body.set_insert_point(body.ins, body.sel)
# @+node:ekr.20190510053112.1: *7* app.action.select_tree_using_ap
@flx.action
def select_tree_using_ap(self, ap):
    """A helper action, called from flx_tree.on_selected_event."""
    # tag = 'py.app.select_tree_using_ap'
    # print(tag, ap ['headline'])
    p = self.ap_to_p(ap)
    self.c.frame.tree.select(p)
# @+node:ekr.20181111204659.1: *7* app.p_to_ap (updates dict)
def p_to_ap(self, p):
    """
    Convert a true Leo position to a serializable archived position.
    """
    if not p.v:
        print('app.p_to_ap: no p.v: %r %s' % (p, g.callers()))
        assert False, g.callers()
    p_gnx = p.v.gnx
    if p_gnx not in self.gnx_to_vnode:
        # print('=== update gnx_to_vnode', p_gnx, p.h)
            # len(list(self.gnx_to_vnode.keys())
        self.gnx_to_vnode[p_gnx] = p.v
    return {
        'childIndex': p._childIndex,
        'cloned': p.isCloned(),
        'expanded': p.isExpanded(),
        'gnx': p.v.gnx,
        'level': p.level(),
        'headline': p.h,
        'marked': p.isMarked(),
        'stack': [{
            'gnx': stack_v.gnx,
            'childIndex': stack_childIndex,
            'headline': stack_v.h,
        } for (stack_v, stack_childIndex) in p.stack],
    }
# @+node:ekr.20181215154640.1: *7* app.update_body_from_dict & helper
def update_body_from_dict(self, d):
    """
    Update the *old* p.b from d.

    Add 'ins_row/col', 'sel_row/col/1/2' keys.
    """
    tag = 'py.app.update_body_from_dict'
    p, v = self.c.p, self.c.p.v
    assert v, g.callers()
    #
    # Check d[old_ap], if it exists.
    old_p = self.ap_to_p(d['old_ap'])
    if p != old_p:
        print('%30s: MISMATCH: %s ==> %s' % (tag, old_p.h, p.h))
        return
    if 'select' in g.app.debug:
        tag = 'py.app.update_body_from_dict'
        d_s = d['s']
        print('%30s: %4s %s %s' % (tag, ' ', p.gnx, p.h))
        print('%30s: p.b: %s d.s: %s' % (tag, len(p.b), len(d_s)))
        # self.dump_dict(d, tag)
    self.update_body_from_dict_helper(d)
# @+node:ekr.20190511094352.1: *8* app.update_body_from_dict_helper
def update_body_from_dict_helper(self, d):
    """Update the body dict, without checks."""
    c, v = self.c, self.c.p.v
    d_s = d['s']
    #
    # Compute the insert point and selection range.
    col, row = d['ins_col'], d['ins_row']
    ins = g.convertRowColToPythonIndex(d_s, row, col)
    col1, row1 = d['sel_col1'], d['sel_row1']
    col2, row2 = d['sel_col2'], d['sel_row2']
    sel1 = g.convertRowColToPythonIndex(d_s, row1, col1)
    sel2 = g.convertRowColToPythonIndex(d_s, row2, col2)
    sel = (sel1, sel2)
    #
    # Update v's ivars
    v.setBodyString(d_s)
    v.insertSpot = ins
    # #2020: The old code updated a non-existent attribute.
    v.selectionStart = ins
    v.selectionLength = abs(sel1 - sel2)
    #
    # Update the body wrapper's ivars (for minibuffer commands).
    if 0:
        # These don't seem to work properly.
        # Besides, we are about to change nodes.
        w = c.frame.body.wrapper
        w.ins, w.sel, w.s = ins, sel, d_s
# @+node:ekr.20181122132009.1: *6* app.Testing...
# @+node:ekr.20181111142921.1: *7* app.action: do_command & helpers
@flx.action
def do_command(self, command, key, mods):
    c = self.c
    w = self.main_window
    #
    # Intercept in-progress commands.
    h1 = 'Set headline: '
    if command.startswith(h1):
        self.end_set_headline(command[len(h1) :])
        return
    h2 = 'Find: '
    if command.startswith(h2):
        self.end_find(command[len(h2) :])
        return
    func = getattr(self, 'do_' + command, None)
    if func:
        func()
        return
    #
    # Pass all other commands to Leo.
    self.execute_minibuffer_command(command, key, mods)
    c.k.keyboardQuit()
    w.body.set_focus()
# @+node:ekr.20181210054910.1: *8* app.do_cls
def do_cls(self):
    c = self.c
    w = self.root.main_window
    g.cls()
    c.k.keyboardQuit()
    w.body.set_focus()
# @+node:ekr.20181210055704.1: *8* app.do_find & helpers
def do_find(self):
    c = self.c
    event = leoGui.LeoKeyEvent(c,
        binding='',
        char='',
        event={'c': c},
        w=c.frame.miniBufferWidget,
    )
    c.interactive(
        callback=self.terminate_do_find,
        event=event,
        prompts=['Find: ',],
    )

def terminate_do_find(self):
    """Never called."""
# @+node:ekr.20181210092900.1: *9* app.end_find
def end_find(self, pattern):
    c = self.c
    fc = c.findCommands

    class DummyFTM:
        def get_find_text(self):
            return pattern
        def get_change_text(self):
            return ''
        def get_settings(self):
            return g.Bunch(
                # Find/change strings...
                find_text=pattern,
                change_text='',
                # Find options...
                file_only=False,
                ignore_case=True,
                mark_changes=False,
                mark_finds=False,
                node_only=False,
                pattern_match=False,
                search_body=True,
                search_headline=True,
                suboutline_only=False,
                whole_word=True,
            )

    # Init the search.
    fc.ftm = DummyFTM()
    # Do the search.
    fc.find_next()
    if 1:  # Testing only?
        w = self.root.main_window
        c.k.keyboardQuit()
        c.redraw()
        w.body.set_focus()
# @+node:ekr.20181119103144.1: *8* app.do_focus
def do_focus(self):
    c = self.c
    old_debug = g.app.debug
    try:
        g.app.debug = ['focus', 'keys',]
        print('\ncalling c.set_focus(c.frame.miniBufferWidget.widget')
        c.set_focus(c.frame.miniBufferWidget.widget)
    finally:
        g.app.debug = old_debug



# @+node:ekr.20181210055648.1: *8* app.do_head & helpers
def do_head(self):
    c = self.c
    event = leoGui.LeoKeyEvent(c,
        binding='',
        char='',
        event={'c': c},
        w=c.frame.miniBufferWidget,
    )
    c.interactive(
        callback=self.terminate_do_head,
        event=event,
        prompts=['Set headline: ',],
    )

def terminate_do_head(self, args, c, event):
    """never actually called."""
# @+node:ekr.20181210092817.1: *9* app.end_set_headline (leoflexx.py)
def end_set_headline(self, h):
    c, k, p, u = self.c, self.c.k, self.c.p, self.c.undoer
    w = self.root.main_window
    # Undoably set the head. Like leoTree.onHeadChanged.
    p.v.setHeadString(h)
    undoType = 'set-headline'
    undoData = u.beforeChangeNodeContents(p)
    if not c.changed:
        c.setChanged()
    p.setDirty()
    u.afterChangeNodeContents(p, undoType, undoData)
    k.keyboardQuit()
    c.redraw()
    w.body.set_focus()
# @+node:ekr.20181210054631.1: *8* app.do_redraw
def do_redraw(self):
    print('testing redraw...')
    self.redraw(None)
# @+node:ekr.20181210054631.2: *8* app.do_select
def do_select(self):
    print('testing select...')
    c = self.c
    h = 'Active Unit Tests'
    p = g.findTopLevelNode(c, h, exact=True)
    if p:
        c.frame.tree.select(p)
        # LeoBrowserTree.select.
    else:
        print('app.do_select: not found: %s' % h)
# @+node:ekr.20181210054516.1: *8* app.do_test
def do_test(self):
    c = self.c
    w = self.root.main_window
    print('testing positions...')
    self.test_round_trip_positions()
    self.do_select()
    c.k.keyboardQuit()
    c.redraw()
    w.body.set_focus()

# @+node:ekr.20181126104843.1: *7* app.test_full_outline
def test_full_outline(self, p):
    """Exercise the new diff-based redraw code on a fully-expanded outline."""
    c = self.c
    p = p.copy()
    # Don't call c.expandAllHeadlines: it calls c.redraw.
    for p2 in c.all_positions(copy=False):
        p2.expand()
    # Test the code.
    self.make_redraw_dict(p)  # Call this only for timing stats.
    # Restore the tree.
    for p2 in c.all_positions(copy=False):
        p2.contract()
    c.expandAllAncestors(p)  # Does not do a redraw.
# @+node:ekr.20181113180246.1: *7* app.test_round_trip_positions
def test_round_trip_positions(self):
    """Test the round tripping of p_to_ap and ap_to_p."""
    c = self.c
    # Bug fix: p_to_ap updates app.gnx_to_vnode. Save and restore it.
    old_d = self.gnx_to_vnode.copy()
    old_len = len(list(self.gnx_to_vnode.keys()))
    # t1 = time.process_time()
    for p in c.all_positions():
        ap = self.p_to_ap(p)
        p2 = self.ap_to_p(ap)
        assert p == p2, (repr(p), repr(p2), repr(ap))
    self.gnx_to_vnode = old_d
    new_len = len(list(self.gnx_to_vnode.keys()))
    assert old_len == new_len, (old_len, new_len)
    # print('app.test_round_trip_positions: %5.3f sec' % (time.process_time()-t1))
# @+node:ekr.20181115092337.3: *5* class LeoBrowserBody
class LeoBrowserBody(leoFrame.NullBody):

    def __init__(self, frame):
        super().__init__(frame, parentFrame=None)
        self.c = c = frame.c
        self.root = get_root()
        self.tag = 'py.body.wrap'
        # Replace the StringTextWrapper with the ace wrapper.
        assert isinstance(self.wrapper, StringTextWrapper)
        self.wrapper = API_Wrapper(c, 'body')

    # The Ace Wrapper does almost everything.
    def __getattr__(self, attr):
        if self.root.inited:
            return getattr(self.wrapper, attr)
        raise AttributeError('app not inited')

    def getName(self):
        return 'body'  # Required for proper pane bindings.

# @+node:ekr.20181115092337.6: *5* class LeoBrowserFrame
class LeoBrowserFrame(leoFrame.NullFrame):

    def __init__(self, c, title, gui):
        """Ctor for the LeoBrowserFrame class."""
        super().__init__(c, title, gui)
        assert self.c == c
        frame = self
        self.root = get_root()
        self.tag = 'py.frame.wrap'
        #
        # Instantiate the other wrappers.
        self.body = LeoBrowserBody(frame)
        self.tree = LeoBrowserTree(frame)
        self.log = LeoBrowserLog(frame)
        self.menu = LeoBrowserMenu(frame)
        self.miniBufferWidget = LeoBrowserMinibuffer(c, frame)
        self.iconBar = LeoBrowserIconBar(c, frame)
        # NullFrame does this in createStatusLine.
        self.statusLine = LeoBrowserStatusLine(c, frame)
        # This is the DynamicWindow object.
        # There is no need to implement its methods now.
        self.top = g.NullObject()

    def finishCreate(self):
        """Override NullFrame.finishCreate."""
        # Do not call self.createFirstTreeNode.

    @others
# @+node:ekr.20181113041113.1: *5* class LeoBrowserGui
class LeoBrowserGui(leoGui.NullGui):

    def __init__(self, gui_name='browser'):
        # leoTest.doTest special-cases the name "browser".
        super().__init__(guiName='browser')
        self.gui_name = gui_name  # May specify the actual browser.
        assert gui_name.startswith('browser')
        self.logWaiting = []
        self.root = None  # Will be set later.
        self.silentMode = True  # Don't print a single signon line.
        self.tag = 'py.gui'
        self.specific_browser = gui_name.lstrip('browser').lstrip(':').lstrip('-').strip()
        if not self.specific_browser:
            self.specific_browser = 'browser'
        self.consoleOnly = False  # Console is separate from the log.
        #
        # Monkey-patch g.app.writeWaitingLog to be a do-nothing.
        # This allows us to write the log much later.
        g.app.writeWaitingLog = self.writeWaitingLog1

    def insertKeyEvent(self, event, i):
        """Insert the key given by event in location i of widget event.w."""
        # Mysterious...
        assert False, g.callers()

    @others
# @+node:ekr.20181206153033.1: *6* gui.createLeoFrame
def createLeoFrame(self, c, title):
    """
    Override NullGui.createLeoFrame.

    We can't create a real flx.Frame until much later.

    We create a placeholder in g.app.windowList, for app.finish_create.
    """
    gui = self
    self.lastFrame = DummyFrame(c, title, gui)
    # A buglet in Leo's core: this is necessary.
    # LM.doPostPluginsInit tests g.app.windowList, maybe when it shouldn't.
    g.app.windowList.append(self.lastFrame)
    return self.lastFrame
# @+node:ekr.20181119141542.1: *6* gui.isTextWrapper
def isTextWrapper(self, w):
    """Return True if w is supposedly a text widget."""
    # isinstance is much more pythonic than using getName.
    if isinstance(w, (
        LeoBrowserBody,
        LeoBrowserLog,
        LeoBrowserMinibuffer,
        # LeoFlexxTreeItem,
            # At present, Leo's core can not edit tree items.
        LeoBrowserStatusLine,
    )):
        return True
    if g.unitTesting and isinstance(w, StringTextWrapper):
        return True
    return False
# @+node:ekr.20181119153936.1: *6* gui.focus...
def get_focus(self, *args, **kwargs):
    w = self.focusWidget
    # print('%s: get_focus: %r' % (self.tag, w.tag))
    return w

def set_focus(self, commander, widget):
    # Be careful during startup.
    if g.unitTesting:
        return
    if not self.root:
        return
    c = self.root.c
    if isinstance(widget, (
        LeoBrowserBody,
        LeoBrowserFrame,
        LeoBrowserLog,
        LeoBrowserMinibuffer,
        LeoBrowserTree,
    )):
        if 'focus' in g.app.debug:
            tag = 'gui.set_focus'
            print('%30s: %s' % (tag, widget.tag))
        widget.setFocus()
        self.focusWidget = widget
    elif isinstance(widget, API_Wrapper):
        # This does sometimes get executed.
        # print('gui.set_focus: redirect AceWrapper to LeoBrowserBody')
        assert isinstance(c.frame.body, LeoBrowserBody), repr(c.frame.body)
        assert widget.name == 'body', repr(widget.name)
        c.frame.body.wrapper.setFocus()
    else:
        print('gui.set_focus: unknown widget', repr(widget), g.callers(6))
# @+node:ekr.20181206090210.1: *6* gui.writeWaitingLog1/2
def writeWaitingLog1(self, c=None):
    """Monkey-patched do-nothing version of g.app.writeWaitingLog."""

def writeWaitingLog2(self, c=None):
    """Called from app.finish_create."""
    w = self.root.main_window
    #
    # Print the signon.
    table = [
        ('Leo Log Window', 'red'),
        ('flexx version: %s' % (flx.__version__), None),
        (g.app.signon, None),
        (g.app.signon1, None),
        (g.app.signon2, None),
    ]
    for message, color in table:
        if message.strip():
            w.log.put(message.rstrip())
    #
    # Write all the queued log entries.
        # c.setLog()
    g.app.logInited = True  # Prevent recursive call.
    for msg in g.app.logWaiting:
        s, color, newline = msg[:3]  # May have 4 elements.
        w.log.put(s.rstrip())
    g.app.logWaiting = []
    g.app.setLog(None)  # Essential when opening multiple files...
# @+node:ekr.20181202083305.1: *6* gui.runMainLoop
def runMainLoop(self):
    """Run the main loop from within Leo's core."""
    runtime = self.specific_browser or 'webruntime'
    flx.launch(LeoBrowserApp, runtime)
    flx.set_log_level('ERROR')  #  'INFO'
    flx.run()
# @+node:ekr.20181115092337.21: *5* class LeoBrowserIconBar
class LeoBrowserIconBar(leoFrame.NullIconBarClass):

    def __init__(self, c, parentFrame):
        super().__init__(c, parentFrame)
        assert self.c == c
        self.root = get_root()
        self.tag = 'py.icon bar.wrap'

    @others
# @+node:ekr.20181115092337.22: *5* class LeoBrowserLog
class LeoBrowserLog(leoFrame.NullLog):

    def __init__(self, frame, parentFrame=None):
        super().__init__(frame, parentFrame)
        self.root = get_root()
        self.tag = 'py.log.wrap'
        assert isinstance(self.widget, StringTextWrapper)
        self.logCtrl = self.widget
        self.logWidget = self.widget
        self.wrapper = self.widget
        #
        # Monkey-patch self.wrapper, a StringTextWrapper.
        assert self.wrapper.getName().startswith('log')
        self.wrapper.setFocus = self.setFocus

    def __repr__(self):
        return 'LeoBrowserLog'

    __str__ = __repr__

    def getName(self):
        return 'log'  # Required for proper pane bindings.

    @others
# @+node:ekr.20181120063043.1: *6* log_wrapper.setFocus
def setFocus(self):
    w = self.root.main_window
    w.log.set_focus()
# @+node:ekr.20181120063111.1: *6* log_wrapper.put & putnl
def put(self, s, color=None, tabName='Log', from_redirect=False, nodeLink=None):
    self.root.main_window.log.put(s)

def putnl(self, tabName='Log'):
    self.root.main_window.log.put('')
# @+node:ekr.20181115092337.31: *5* class LeoBrowserMenu
class LeoBrowserMenu(leoMenu.NullMenu):
    """Browser wrapper for menus."""

    # def __init__(self, frame):
        # super().__init__(frame)
        # self.root = get_root()
        # self.tag = '(py.menu.wrap)'

    # @verbatim
    # @others
# @+node:ekr.20181115120317.1: *5* class LeoBrowserMinibuffer (StringTextWrapper)
# Leo's core doesn't define a NullMinibuffer class.

class LeoBrowserMinibuffer(StringTextWrapper):
    """Browser wrapper for minibuffer."""

    def __init__(self, c, frame):
        # Name must be minibuffer, for gui.isTextWrapper().
        super().__init__(c, name='minibuffer')
        assert self.c == c, repr(self.c)
        # assert c.frame == frame, (repr(c.frame), repr(frame))
            # c.frame is a NullFrame.  frame is a LeoBrowserFrame.
        assert self.widget is None, repr(self.widget)
        assert self.getName() == 'minibuffer'
        self.frame = frame
        self.root = get_root()
        self.tag = 'py.mini.wrap'
        self.widget = self
        self.wrapper = self
        # Hook this class up to the key handler.
        c.k.w = self
        c.miniBufferWidget = self  # #1146.
        frame.minibufferWidget = self

    # Overrides.
    def setFocus(self):
        self.root.main_window.minibuffer.set_focus()

    @others
# @+node:ekr.20181208062449.1: *6* mini.called by k.minibuffer
# Override the methods called by k.minibuffer:

last_ins = None
last_sel = None
last_s = None

def update(self, tag):
    w = self.root.main_window
    i, j = self.sel
    # Do nothing if there will be no changes.
    if self.s == self.last_s and self.last_ins == self.ins and self.sel == self.last_sel:
        return
    # Remember the present values.
    self.last_ins, self.last_s, self.last_sel = self.ins, self.s, self.sel
    if 'select' in g.app.debug:
        tag = 'py.mini.update'
        print('%30s: sel: %3s %3s ins: %3s len(s): %s' % (
            tag, i, j, self.ins, len(self.s)))
    w.minibuffer.set_text(self.s)
    w.minibuffer.set_selection(i, j)
    w.minibuffer.set_insert(self.ins)

def delete(self, i: int, j: Optional[int] = None):
    super().delete(i, j)
    self.update('delete')

def getAllText(self):
    return self.s

def insert(self, i: int, s: str) -> None:
    super().insert(i, s)
    self.update('insert')

def setAllText(self, s):
    super().setAllText(s)
    self.update('setAllText')

def setSelectionRange(self, i: int, j: int, insert: Optional[int] = None):
    super().setSelectionRange(i, j, insert)
    self.update('setSelectionRange')

def setStyleClass(self, name):
    w = self.root.main_window
    if not w:
        g.trace('NO MAIN WINDOW')
        return
    # if 'focus' in g.app.debug:
        # tag = 'py.mini.setStyleClass'
        # print('%30s: %r' % (tag, name))
    w.minibuffer.set_style(name)
    self.update('setStyleClass:%r' % name)
# @+node:ekr.20181115092337.32: *5* class LeoBrowserStatusLine
class LeoBrowserStatusLine(leoFrame.NullStatusLineClass):

    def __init__(self, c, parentFrame):
        super().__init__(c, parentFrame)
        self.root = get_root()
        self.tag = 'py.status.wrap'
        self.w = self  # Required.

    def getName(self):
        return 'status line'  # Value not important.

    # Pretend that this widget supports the high-level interface.
    def __getattr__(self, attr):
        return g.NullObject()

    @others
# @+node:ekr.20181119045430.1: *6* status_line_wrapper.clear & get
def clear(self):
    pass

def get(self):
    print('status_line.get: NOT READY')
    return ''
# @+node:ekr.20181119045343.1: *6* status_line_wrapper.put and put1
def put(self, s, bg=None, fg=None):
    w = self.root.main_window
    # Be careful during startup.
    if w and w.status_line:
        w.status_line.put(s, bg, fg)

def put1(self, s, bg=None, fg=None):
    w = self.root.main_window
    # Be careful during startup.
    if w and w.status_line:
        w.status_line.put2(s, bg, fg)
# @+node:ekr.20181119154422.1: *6* status_line_wrapper.setFocus
def setFocus(self):
    self.root.status_line.set_focus()
# @+node:ekr.20181119042937.1: *6* status_line_wrapper.update
def update(self, body_text='', insert_point=0):
    """
    Update the status line, based on the contents of the body.

    Called from LeoTree.select.

    Returns (lt_part, rt_part) for LeoBrowserApp.init.
    """
    # pylint: disable=arguments-differ
    if g.app.killed:
        return None
    c, p = self.c, self.c.p
    if not p:
        return None
    # Calculate lt_part
    row, col = g.convertPythonIndexToRowCol(body_text, insert_point)
    fcol = c.gotoCommands.find_node_start(p)
    lt_part = "line: %2d col: %2d fcol: %s" % (row, col, fcol or '')
    # Calculate rt_part.
    headlines = [v.h for (v, childIndex) in p.stack] + [p.h]
    rt_part = '%s#%s' % (g.shortFileName(c.fileName()), '->'.join(headlines))
    # Update the status area.
    self.put(lt_part)
    self.put1(rt_part)
    return lt_part, rt_part
# @+node:ekr.20181115092337.57: *5* class LeoBrowserTree
class LeoBrowserTree(leoFrame.NullTree):

    def __init__(self, frame):
        super().__init__(frame)
        self.root = get_root()
        self.select_lockout = False
        self.tag = 'py.tree.wrap'
        self.widget = self
        self.wrapper = self

    def getName(self):
        return 'canvas(tree)'  # Required for proper pane bindings.

    @others
# @+node:ekr.20181116081421.1: *6* tree.selection...


# @+node:ekr.20190508121417.1: *7* tree.complete_select
def complete_select(self, d):
    """Complete the selection of the tree."""
    trace = 'select' in g.app.debug
    tag = 'py.tree.complete_select'
    if not self.new_p:
        self.select_lockout = False
        print('%30s: can not happen: no new_p')
        return
    p = self.root.ap_to_p(d['new_ap'])
    if p != self.new_p:
        print('%30s: expected: %s, got: %s' % (tag, self.new_p.h, p.h))
        return
    if trace:
        print('%30s: %4s %s %s' % (tag, len(p.b), p.gnx, p.h))
    #
    # Allow a new select.
    self.select_lockout = False
    #
    # Make everything official in Leo's core.
    super().select(p)  # Call LeoTree.select.
    self.root.select_p(p)  # Call app.select_position.
# @+node:ekr.20190508121510.1: *7* tree.endEditLabel
def endEditLabel(self):
    """
    End editing.

    This must be a do-nothing, because app.end_set_headline takes its place.
    """
    # print(flx.tree.endEditLabel')
# @+node:ekr.20190508121414.1: *7*  tree.select
# The lockout ensures that old_p never changes during the selection process.
select_lockout = False
old_p = None
new_p = None

def select(self, p):
    """
    Override LeoTree.select.

    Operations across the Python/JS divide do not happen immediately. As a result,
    the selection must happen in phases:

    Phase 1a: PY side:
    - Remember the old and new p.
    - Schedule sync_body_before_select to update of the old p.b on the JS side.
    - Set the lockout, so only one JS update is ever done.

    Phase 1b: PY side:
    - If the lockout is set, just update the *new* p.

    Phase 2: JS side:
    - Update the *old* p.b and related vnode ivars from the body pane.
      Important: The lockout never changes the *old* p.
    - Call flx.tree.complete_select to update the widget.
    - Schedule app.complete_select.

    Phase 3: PY side:
    """
    trace = 'select' in g.app.debug
    tag = 'py.tree.select'
    w = self.root.main_window
    if self.select_lockout:
        self.new_p = p.copy()
        if trace:
            print('%30s: %s %s' % (tag, ' Lockout', p.h))
        return
    if not self.root.inited:
        # Don't sync the body pane during startup.
        super().select(p)  # Call LeoTree.select
        self.root.select_p(p)  # Call app.select_position.
        return
    #
    # Begin the selection.
    self.select_lockout = True
    self.new_p = p.copy()
    old_p = self.root.c.p
    if trace:
        print('\n%30s: %4s %s ==> %s %s ' % (
            tag, len(old_p.v._bodyString), old_p.h, len(p.v._bodyString), p.h))
    #
    # Schdule JS side to:
    # 1. Update *old* p.b from flx.body.
    #    The lockout ensures that the old p never changes.
    # 2. Schedule self.complete_select.
    w.body.sync_body_before_select({
        'old_ap': self.root.p_to_ap(old_p),
        'new_ap': self.root.p_to_ap(p),
    })
# @+node:ekr.20190508121417.2: *7* tree.select_ap
def select_ap(self, ap):
    trace = 'select' in g.app.debug
    p = self.root.ap_to_p(ap)
    if trace:
        tag = 'py.tree.select_ap'
        print('%30s: %s %s' % (tag, p.v.fileIndex, p.v._headString))
    self.select(p)
# @+node:ekr.20190508121417.3: *7* tree.super_select
def super_select(self, p):
    """Call only LeoTree.select."""
    trace = 'select' in g.app.debug
    if trace:
        tag = 'py.tree.super_select'
        print('%30s %4s %s %s' % (tag, len(p.b), p.gnx, p.h))
    super().select(p)

# @+node:ekr.20181118052203.1: *6* tree.redraw
def redraw(self, p=None):
    """This is c.frame.tree.redraw!"""
    self.root.redraw(p)
# @+node:ekr.20181120063844.1: *6* tree.setFocus
def setFocus(self):
    w = self.root.main_window
    w.tree.set_focus()
# @+node:ekr.20181119094122.1: *5* class TracingNullObject (leoflexx.py)
@nobeautify

class TracingNullObject:
    """A tracing version of g.NullObject."""
    def __init__(self, *args, **keys):
        pass

    def __call__(self, *args, **keys):
        return self

    def __repr__(self):
        return "NullObject"

    def __str__(self):
        return "NullObject"

    def __bool__(self):
        return False

    def __delattr__(self, attr):
        print('NullObject.__delattr__ %r %s' % (attr, g.callers()))
        return self

    def __getattr__(self, attr):
        print('NullObject.__getattr__ %r %s' % (attr, g.callers()))
        return self

    def __setattr__(self, attr, val):
        print('NullObject.__setattr__ %r %s' % (attr, g.callers()))
        return self
# @+node:ekr.20181107052700.1: *4* Js side: flx.Widgets
# @+node:ekr.20181201125953.1: *5* class JS_Editor (flx.Widget)
class JS_Editor(flx.Widget):
    """
    The base class for the body and log panes.
    """

    def init(self, name, flex=1):
        # pylint: disable=arguments-differ
        assert name in ('body', 'log', 'minibuffer'), repr(name)
        self.name = name
        self.tag = 'flx.%s' % name
        self.editor = None  # Now done in subclasses.

    @flx.reaction('size')
    def __on_size(self, *events):
        self.editor.resize()

    @others
# @+node:ekr.20181121072246.1: *6* jse.Keys
# @+node:ekr.20181215083729.1: *7* jse.key_press & on_key_press
last_down = None
ignore_up = False

@flx.emitter  # New
def key_down(self, e):
    trace = False and 'keys' in g.app.debug
    self.ignore_up = False
    ev = self._create_key_event(e)
    down = '%s %r' % (self.name, ev)
    if down != self.last_down:
        self.last_down = down
        if trace:
            print('     jse.key_down:', down)
    if self.should_be_leo_key(ev):
        e.preventDefault()
    return ev

@flx.emitter  # New
def key_up(self, e):
    trace = False and 'keys' in g.app.debug
    self.last_down = None  # Enable key downs.
    ev = self._create_key_event(e)
    if self.ignore_up:
        if trace:
            print('IGNORE jse.key_up: %s %r' % (self.name, ev))
        e.preventDefault()
        return ev
    self.ignore_up = True  # Ignore all further key ups, until the next key down.
    should_be_leo = bool(self.should_be_leo_key(ev))
    if trace:
        print('       jse.key_up: %s %r Leo: %s' % (self.name, ev, should_be_leo))
    if should_be_leo:
        e.preventDefault()
        ivar = 'minibufferWidget' if self.name == 'minibuffer' else self.name
        self.root.do_key(ev, ivar)
    return ev

@flx.emitter
def key_press(self, e):
    trace = 'keys' in g.app.debug
    ev = self._create_key_event(e)
    # Init the key_down state.
    self.last_down = None
    # Ignore all key ups until the next key down.
    self.ignore_up = True
    if trace:
        print('    jse.key_press: %s %r' % (self.name, ev))
    if self.should_be_leo_key(ev):
        e.preventDefault()
    return ev

@flx.reaction('key_press')
def on_key_press(self, *events):
    # The JS editor has already** handled the key!
    for ev in events:
        if 'keys' in g.app.debug:
            print(' jse.on_key_press: %s %r' % (self.name, ev))
        if self.should_be_leo_key(ev):
            ivar = 'minibufferWidget' if self.name == 'minibuffer' else self.name
            self.root.do_key(ev, ivar)
# @+node:ekr.20181201081444.1: *7* jse.should_be_leo_key
def should_be_leo_key(self, ev):
    """
    Return True if Leo should handle the key.
    Leo handles only modified keys, not F-keys or plain keys.
    """
    trace = False and 'keys' in g.app.debug
    tag = 'JSE.should_be_leo_key'
    key, mods = ev['key'], ev['modifiers']
    #
    # The widget handles F-keys.
    if not mods and key.startswith('F'):
        if trace:
            print('%s: %r %r return: false' % (tag, mods, key))
        return False
    mods2 = mods
    if 'Shift' in mods2:
        mods2.remove('Shift')
    #
    # Send only Ctrl-Arrow keys to body.
    if mods2 == ['Ctrl'] and key in ('RtArrow', 'LtArrow', 'UpArrow', 'DownArrow'):
        # This never fires: Neither editor widget emis Ctrl/Arrow keys or Alt-Arrow keys.
        if trace:
            print(tag, 'Arrow key: send to to body', repr(mods), repr(key))
        return False
    #
    # Leo should handle all other modified keys.
    if trace:
        print('%s: %r %r return: %s' % (tag, mods, key, bool(mods2)))
    return mods2
# @+node:ekr.20181215083642.1: *6* jse.focus
@flx.action
def see_insert_point(self):
    if 'focus' in g.app.debug:
        tag = '%s.%s' % (self.tag, 'jse.see_insert_point')
        print('%30s:' % tag)

@flx.action
def set_focus(self):
    if 'focus' in g.app.debug:
        tag = '%s.%s' % (self.tag, 'jse.set_focus')
        print('%30s:' % tag)
    self.editor.focus()
# @+node:ekr.20181215061810.1: *6* jse.text getters
def get_ins(self):
    d = self.editor.selection.getCursor()
    row, col = d['row'], d['column']
    # print('%s: get_ins: row: %s col: %s' % (self.tag, row, col))
    return row, col

def get_sel(self):
    selections = self.editor.selection.getAllRanges()
    if selections:
        sel = selections[0]
        d1, d2 = sel['start'], sel['end']
        row1, col1 = d1['row'], d1['column']
        row2, col2 = d2['row'], d2['column']
    else:
        print('%s: get_sel: NO SELECTION' % self.tag)
        i = self.get_ins()
        row1 = col1 = row2 = col2 = i
    # print('%s: get_sel: (%s, %s) to (%s, %s)' % (self.tag, row1, col1, row2, col2))
    return row1, col1, row2, col2

def get_text(self):
    editor = self.editor
    s = editor.getValue()
    # print('%s: get_text: %s' % (self.tag, len(s)))
    return s
# @+node:ekr.20181128061524.1: *6* jse.text setters
@flx.action
def insert(self, s):
    # print(self.tag, 'insert', repr(s))
    self.editor.insert(s)

@flx.action
def select_all_text(self):
    if 'select' in g.app.debug:
        print(self.tag, 'select_all_text')

@flx.action
def set_insert_point(self, insert, sel):
    s = self.editor.getValue()
    row, col = g.convertPythonIndexToRowCol(s, insert)
    row = max(0, row - 1)
    if 'select' in g.app.debug:
        tag = '%s.%s' % (self.tag, 'set_insert_point')
        print('%30s: i: %s = (%s, %s)' % (
            tag, str(insert).ljust(3), row, col))
    self.editor.moveCursorTo(row, col)

@flx.action
def set_selection_range(self, i, j):
    print(self.tag, 'set_selection_range: NOT READY', i, j)

@flx.action
def set_text(self, s):
    """Set the entire text"""
    # print('%s.set_text: len(s): %s' % (self.tag, len(s)))
    self.editor.setValue(s)
# @+node:ekr.20181104082144.1: *5* class LeoFlexxBody (JS_Editor)
class LeoFlexxBody(JS_Editor):
    """A CodeEditor widget based on Ace."""

    << body css >>

    def init(self):
        # pylint: disable=arguments-differ
        super().init('body')
        self.editor = make_editor_function(self.name, self.node)

    @others
# @+node:ekr.20181120055046.1: *6* << body css >>
CSS = """
.flx-CodeEditor > .ace {
    width: 100%;
    height: 100%;
}
"""
# @+node:ekr.20190512094614.1: *6* flx_body.action.set_focus
@flx.action
def set_focus(self):
    if 'focus' in g.app.debug:
        tag = 'flx.body.set_focus'
        print('%30s:' % tag)
    self.editor.focus()
# @+node:ekr.20181215061402.1: *6* flx_body.action.sync*


# @+node:ekr.20190511092226.1: *7* flx.body.action.sync_body_before_save_file
@flx.action
def sync_body_before_save_file(self, d):
    """Update p.b, etc. before executing calling c.fileCommands.save."""
    self.update_body_dict(d)
    if 'select' in g.app.debug:
        tag = 'flx.body.sync_body_before_save_file'
        print('%30s: %r' % (tag, d['s']))
    self.root.complete_save_file(d)
# @+node:ekr.20190511102428.1: *7* flx.body.action.sync_body_before_save_file_as
@flx.action
def sync_body_before_save_file_as(self, d):
    """Update p.b, etc. before executing calling c.fileCommands.saveAs."""
    self.update_body_dict(d)
    if 'select' in g.app.debug:
        tag = 'flx.body.sync_body_before_save_file_as'
        print('%30s: %r' % (tag, d['s']))
    self.root.complete_save_file_as(d)
# @+node:ekr.20190511102429.1: *7* flx.body.action.sync_body_before_save_file_to
@flx.action
def sync_body_before_save_file_to(self, d):
    """Update p.b, etc. before executing calling c.fileCommands.saveTo."""
    self.update_body_dict(d)
    if 'select' in g.app.debug:
        tag = 'flx.body.sync_body_before_save_file_to'
        print('%30s: %r' % (tag, d['s']))
    self.root.complete_save_file_to(d)
# @+node:ekr.20190510070009.1: *7* flx.body.action.sync_body_before_select
@flx.action
def sync_body_before_select(self, d):
    """
    Called by app.tree.select to update the *old* p.b, etc. before selecting a new node.

    d['old_ap']: The AP for the old node.
    d['new_ap']: The AP for the new node.
    """
    if 'select' in g.app.debug:
        tag = 'flx.body.sync_body_before_select'
        print('')
        print('%30s: %s ==> %s' % (
            tag, d['old_ap']['headline'], d['old_ap']['headline']))
        # self.root.dump_dict(d, tag)
    # Sets d.s, etc., describing insert point & selection range.
    self.update_body_dict(d)
    self.root.complete_select(d)

# @+node:ekr.20190510070010.1: *6* flx.body.update_body_dict
def update_body_dict(self, d):
    """
    Add keys to d describing flx.body.

    This sets d.s & other keys from the *old* position.
    """
    #
    # Remember the body text.
    d['s'] = self.get_text()
    d['ins_row'], d['ins_col'] = self.get_ins()
    #
    # Remember the selection ranges.
    row1, col1, row2, col2 = self.get_sel()
    d['sel_row1'], d['sel_col1'] = row1, col1
    d['sel_row2'], d['sel_col2'] = row2, col2
    if 'select' in g.app.debug:
        d_s = d['s']
        tag = 'flx.body.update_body_dict'
        print('%30s: len(d_s): %4s' % (tag, len(d_s)))
        if 0:
            print('')
            for z in d_s.split('\n'):
                print(repr(z))
            print('')
# @+node:ekr.20181104082149.1: *5* class LeoFlexxLog (JS_Editor)
class LeoFlexxLog(JS_Editor):

    << log css >>

    def init(self):
        # pylint: disable=arguments-differ
        super().init('log')
        self.editor = make_editor_function(self.name, self.node)

    @others
# @+node:ekr.20181120060336.1: *6* << log css >>
CSS = """
.flx-CodeEditor > .ace {
    width: 100%;
    height: 100%;
}
"""
# @+node:ekr.20181120060348.1: *6* flx.log.put & set_focus
@flx.action
def put(self, s):
    prev = self.editor.getValue()
    if prev:
        self.editor.setValue(prev + '\n' + s)
    else:
        self.editor.setValue(s)

@flx.action
def set_focus(self):
    if 'focus' in g.app.debug:
        tag = 'flx.log.set_focus'
        print('%30s:' % tag)
    self.editor.focus()
# @+node:ekr.20181104082130.1: *5* class LeoFlexxMainWindow
class LeoFlexxMainWindow(flx.Widget):

    """
    Leo's main window, that is, root.main_window.

    Each property is accessible as root.main_window.x.
    """
    body = flx.ComponentProp(settable=True)
    log = flx.ComponentProp(settable=True)
    minibuffer = flx.ComponentProp(settable=True)
    status_line = flx.ComponentProp(settable=True)
    tree = flx.ComponentProp(settable=True)

    def init(self):
        self.tag = 'flx.main window'
        with flx.VSplit():
            with flx.HSplit(flex=1):
                tree = LeoFlexxTree(flex=1)
                log = LeoFlexxLog(flex=1)
            body = LeoFlexxBody(flex=1)
            minibuffer = LeoFlexxMiniBuffer()
            status_line = LeoFlexxStatusLine()
        << define unload action >>
        self._mutate('body', body)
        self._mutate('log', log)
        self._mutate('minibuffer', minibuffer)
        self._mutate('status_line', status_line)
        self._mutate('tree', tree)
        self.emit('do_init')

    @flx.reaction('!do_init')
    def after_init(self):
        self.root.finish_create()
# @+node:ekr.20181206044554.1: *6* << define unload action >>
RawJS("""\
// Called from Mozilla, but not webruntime.
window.onbeforeunload = function(){
    // console.log("window.onbeforeunload")
    return "Are you sure?"
};
""")
# @+node:ekr.20181104082154.1: *5* class LeoFlexxMiniBuffer (JS_Editor)
class MinibufferEditor(flx.Widget):

    def init(self):
        # Unlike in components, this call happens immediately.
        self.editor = make_editor_function('minibuffer', self.node)

class LeoFlexxMiniBuffer(JS_Editor):

    def init(self):
        # pylint: disable=arguments-differ
        super().init('minibuffer')
        with flx.HBox():
            flx.Label(text='Minibuffer')
            w = MinibufferEditor(flex=1)
            self.editor = w.editor

    @flx.reaction('pointer_down')
    def on_select(self):
        self.root.select_minibuffer()

    @others
# @+node:ekr.20181127060810.1: *6* flx_minibuffer.high-level interface
# The high-level interface methods, called from LeoBrowserMinibuffer.

@flx.action
def set_focus(self):
    if 'focus' in g.app.debug:
        tag = 'flx.mini.set_focus'
        print('%30s:' % tag)
    self.editor.focus()

@flx.action
def set_insert(self, i):
    if False and 'select' in g.app.debug:
        print('flx.mini.set_insert', i)
    # Where is call?

@flx.action
def set_selection(self, i, j):
    if False and 'select' in g.app.debug:
        print('flx.mini.set_selection', i, j)
    # Where is the call?

@flx.action
def set_style(self, style):
    pass

@flx.action
def set_text(self, s):
    if 'focus' in g.app.debug:
        tag = 'flx.mini.set_text'
        print('%30s: %r' % (tag, s))
    self.editor.setValue(s)
# @+node:ekr.20181203150409.1: *6* flx_minibuffer.Key handling
@flx.emitter
def key_press(self, e):
    """Pass *all* keys except Enter and F12 to Leo's core."""
    # Backspace is not emitted.
    ev = self._create_key_event(e)
    key, mods = ev['key'], ev['modifiers']
    if 'keys' in g.app.debug:
        print('mini.key_press: %r %r' % (mods, key))
    if mods:
        e.preventDefault()
        return ev
    if key == 'F12':
        return ev
    if key == 'Enter':
        self.do_enter_key(key, mods)
        e.preventDefault()
        return None
    # k.masterKeyHandler will handle everything.
    e.preventDefault()
    return ev

@flx.reaction('key_press')
def on_key_press(self, *events):
    """Pass *all* keys Leo's core."""
    for ev in events:
        # print('mini.on_key_press: %r %r' % (ev ['modifiers'], ev['key']))
        self.root.do_key(ev, 'minibufferWidget')
# @+node:ekr.20181129174405.1: *6* flx_minibuffer.do_enter_key
def do_enter_key(self, key, mods):
    """
    Handle the enter key in the minibuffer.
    This will only be called if the user has entered the minibuffer via a click.
    """
    command = self.editor.getValue()
    if 'keys' in g.app.debug:
        print('mini.do_enter_key', repr(command))
    if command.strip():
        if command.startswith('full-command:'):
            command = command[len('full-command:') :].strip()
            self.root.do_command(command, 'Enter', [])
        elif command.startswith('Enter Headline:'):
            headline = command[len('Enter Headline:') :].strip()
            self.root.edit_headline_completer(headline)
        else:
            self.root.do_command(command, key, mods)
# @+node:ekr.20181104082201.1: *5* class LeoFlexxStatusLine
class LeoFlexxStatusLine(flx.Widget):

    def init(self):
        self.tag = 'flx.status'
        with flx.HBox():
            flx.Label(text='Status Line')
            self.widget = flx.LineEdit(flex=1)
            self.widget2 = flx.LineEdit(flex=1)
        self.widget.apply_style('background: green')
        self.widget2.apply_style('background: green')

    @others
# @+node:ekr.20181123043015.1: *6* flx.status_line.action.update
@flx.action
def update(self, lt, rt):
    self.put(lt)
    self.put2(rt)
# @+node:ekr.20181120060957.1: *6* flx_status_line.action.put & put2
@flx.action
def put(self, s, bg=None, fg=None):
    self.widget.set_text(s)

@flx.action
def put2(self, s, bg=None, fg=None):
    self.widget2.set_text(s)
# @+node:ekr.20181120060950.1: *6* flx_status_line.Key handling
@flx.emitter
def key_press(self, e):
    """Allow only F-keys, Ctrl-C and Ctrl-S."""
    ev = self._create_key_event(e)
    key, mods = ev['key'], ev['modifiers']
    print('flx.status_line:', repr(mods), repr(key))
    if not mods and key.startswith('F'):
        return ev
    if mods == ['Ctrl'] and key in 'cs':
        # We don't always get Ctrl-S from the browser.
        return ev
    # Prevent everything else.
    e.preventDefault()
    return ev

@flx.reaction('key_press')
def on_key_press(self, *events):
    """Pass Ctrl-S to Leo."""
    for ev in events:
        key, mods = ev['key'], ev['modifiers']
        if mods == ['Ctrl'] and key == 's':
            self.root.do_key(ev, 'statusLine')
        return ev
# @+node:ekr.20181104082138.1: *5* class LeoFlexxTree
class LeoFlexxTree(flx.Widget):

    << tree css >>

    def init(self):
        self.widget = self
        self.wrapper = self
        self.tag = 'flx.tree'
        # Init local ivars...
        self.populated_items_dict = {}  # Keys are ap **keys**, values are True.
        self.populating_tree_item = None  # The LeoTreeItem whose children are to be populated.
        self.selected_ap = {}  # The ap of the presently selected node.
        self.tree_items_dict = {}  # Keys are ap's. Values are LeoTreeItems.
        # Init the widget. The max_selected property does not seem to work.
        self.tree = flx.TreeWidget(flex=1, max_selected=1)

    def assert_exists(self, obj):
        # pylint: disable=undefined-variable
            # undefined
        global undefined
        assert obj not in (undefined, None, {}), repr(obj)

    @others
# @+node:ekr.20181120061118.1: *6*  << tree css >>

CSS = '''
.flx-TreeWidget {
    background: #000;
    color: white;
    /* background: #ffffec; */
    /* Leo Yellow */
    /* color: #afa; */
}
'''
# @+node:ekr.20181121073246.1: *6* flx_tree.Drawing...
# @+node:ekr.20181112163252.1: *7* flx_tree.action.clear_tree
@flx.action
def clear_tree(self):
    """
    Completely clear the tree, preparing to recreate it.

    Important: we do *not* clear self.tree itself!
    """
    trace = 'drawing' in g.app.debug
    tag = 'flx.tree.clear_tree'
    # pylint: disable=access-member-before-definition
    items = list(self.tree_items_dict.values())
    if trace:
        print('')
        print('%s: %s items' % (tag, len(items)))
    #
    # Clear *all* references first.
    self.tree_items_dict = {}
    self.populated_items_dict = {}
    self.populating_tree_item = None
    #
    # Clear all tree items.
    for item in items:
        if trace:
            print('  %r %s' % (item, item.text))
        item.dispose()
# @+node:ekr.20181113043004.1: *7* flx_tree.action.redraw_with_dict & helper
@flx.action
def redraw_with_dict(self, redraw_dict):
    """
    Clear the present tree and redraw using the **recursive** redraw_list.
    d has the form:
        {
            'c.p': self.p_to_ap(p),
            'items': [
                self.make_dict_for_position(p)
                    for p in c.rootPosition().self_and_siblings()
            ],
        }
    """
    # This is called only from app.action.redraw.
    trace = 'drawing' in g.app.debug
    tag = 'redraw_with_dict'
    assert redraw_dict
    self.clear_tree()
    items = redraw_dict['items']
    if trace:
        print('')
        print('%s: %s direct children' % (tag, len(items)))
    for item in items:
        tree_item = self.create_item_with_parent(item, self.tree)
        if trace:
            print('  %r %s' % (tree_item, item['headline']))
    #
    # Select c.p.
    self.select_ap(redraw_dict['c.p'])
    redraw_dict = {}  # #1127: Remove references to deleted items.
# @+node:ekr.20181124194248.1: *8* tree.create_item_with_parent
def create_item_with_parent(self, item, parent):
    """Create a tree item for item and all its visible children."""
    # pylint: disable=no-member
    # set_collapsed is in the base class.
    trace = 'drawing' in g.app.debug
    tag = 'create_item_with_parent'
    ap = item['ap']
    #
    # Create the node.
    with parent:
        tree_item = LeoFlexxTreeItem(ap, text=ap['headline'], checked=None, collapsed=True)
    tree_item.set_collapsed(not ap['expanded'])
    #
    # Set the data.
    key = self.ap_to_key(ap)
    self.tree_items_dict[key] = tree_item
    #
    # Children are *not* necessarily sent, so set the populated 'bit' only if they are.
    if item['children']:
        if trace:
            print(tag, '**populated**', ap['headline'])
        self.populated_items_dict[key] = True
    if hasattr(parent, 'leo_children'):
        # print(tag, parent.leo_ap['headline'], ap['headline'])
        parent.leo_children.append(tree_item)
    #
    # Create the children.
    for child in item['children']:
        self.create_item_with_parent(child, tree_item)
    return tree_item  # Debugging
# @+node:ekr.20181114072307.1: *7* flx_tree.ap_to_key
def ap_to_key(self, ap):
    """Produce a key for the given ap."""
    self.assert_exists(ap)
    childIndex = ap['childIndex']
    gnx = ap['gnx']
    headline = ap['headline']
    stack = ap['stack']
    stack_s = '::'.join([
        'childIndex: %s, gnx: %s' % (z['childIndex'], z['gnx'])
            for z in stack
    ])
    key = 'Tree key<childIndex: %s, gnx: %s, %s <stack: %s>>' % (
        childIndex, gnx, headline, stack_s or '[]')
    return key
# @+node:ekr.20181113085722.1: *7* flx_tree.dump_ap
def dump_ap(self, ap, padding, tag):
    """Print an archived position fully."""
    stack = ap['stack']
    if not padding:
        padding = ''
    padding = padding + ' ' * 4
    if stack:
        print('%s%s:...' % (padding, tag or 'ap'))
        padding = padding + ' ' * 4
        print('%schildIndex: %s v: %s %s stack: [' % (
            padding,
            str(ap['childIndex']),
            ap['gnx'],
            ap['headline'],
        ))
        padding = padding + ' ' * 4
        for d in ap['stack']:
            print('%s%s %s %s' % (
                padding,
                str(d['childIndex']).ljust(3),
                d['gnx'],
                d['headline'],
            ))
        padding = padding[:-4]
        print(']')
    else:
        print('%s%s: childIndex: %s v: %s stack: [] %s' % (
            padding, tag or 'ap',
            str(ap['childIndex']).ljust(3),
            ap['gnx'],
            ap['headline'],
        ))
# @+node:ekr.20181104080854.3: *7* flx_tree.reaction: on_tree_event
# actions: set_checked, set_collapsed, set_parent, set_selected, set_text, set_visible
@flx.reaction(
    'tree.children**.checked',
    'tree.children**.collapsed',
    'tree.children**.visible',  # Never seems to fire.
)
def on_tree_event(self, *events):
    for ev in events:
        expand = not ev.new_value
        if expand:
            # Don't redraw if the LeoTreeItem has children.
            tree_item = ev.source
            ap = tree_item.leo_ap
            if ap['expanded']:
                if 0:
                    print('flx.tree.on_tree_event: already expanded', ap['headline'])
            else:
                ap['expanded'] = True
                # Populate children, if necessary.
                self.start_populating_children(ap, tree_item)
# @+node:ekr.20181120063735.1: *6* flx_tree.Focus
@flx.action
def set_focus(self):
    if 'focus' in g.app.debug:
        tag = 'flx.tree.set_focus'
        print('%30s:' % tag)
    self.node.focus()
# @+node:ekr.20181123165819.1: *6* flx_tree.Incremental Drawing...
# This are not used, at present, but they may come back.
# @+node:ekr.20181125051244.1: *7* flx_tree.populate_children
def populate_children(self, children, parent_ap):
    """
    Populate the children of the given parent.

    self.populating_tree_item is the LeoFlexxTreeItem to be populated.

    children is a list of ap's.
    """
    parent = self.populating_tree_item
    assert parent
    # The expansion bit may have changed?
    assert parent_ap == parent.leo_ap
    # print('flx.tree.populate_children: parent: %r %s children' % (parent, len(children)))
    for child_ap in children:
        self.create_item_with_parent(child_ap, parent)
    self.populating_tree_item = False
# @+node:ekr.20181111011928.1: *7* flx_tree.start_populating_children
def start_populating_children(self, parent_ap, parent_tree_item):
    """
    Populate the parent tree item with the children if necessary.

    app.send_children_to_tree should send an empty list
    """
    self.assert_exists(parent_ap)
    self.assert_exists(parent_tree_item)
    key = self.ap_to_key(parent_ap)
    if key in self.populated_items_dict:
        # print('%s: already populated: %s' % (tag, headline))
        return
    assert isinstance(parent_tree_item, LeoFlexxTreeItem)
    # Remember the parent_tree_item.
    self.populating_tree_item = parent_tree_item
    #
    # Ask for the items.
    self.root.send_children_to_tree(parent_ap)
# @+node:ekr.20181110175222.1: *7* flx_tree.action.receive_children
@flx.action
def receive_children(self, d):
    """
    Populate the direct descendants of ap. d is compatible with make_redraw_dict:
        {
            'parent_ap': parent_ap,
            'items': [
                self.make_dict_for_position(p)
                    for p in for p in p.children()
            ],
        }
    """
    parent_ap = d['parent_ap']
    children = d['items']
    # print('flx.tree.receive_children: %s children' % (len(children)))
    self.populate_children(children, parent_ap)
# @+node:ekr.20181120061140.1: *6* flx_tree.Key handling
@flx.emitter
def key_press(self, e):
    ev = self._create_key_event(e)
    mods, key = ev['modifiers'], ev['key']
    # f_key = not mods and key.startswith('F')
    # print('flx.tree.key_press: %r preventDefault: %s', (ev, not f_key))
    # Use default action for F-Keys.
    if not mods and key.startswith('F'):
        return ev
    # Don't ignore Return
    # if not mods and key == 'Enter':
    #     return ev
    #
    # Prevent default action for all other keys.
    e.preventDefault()
    return ev

@flx.reaction('tree.key_press')
def on_key_press(self, *events):
    # print('flx.tree.on_key_press')
    for ev in events:
        self.root.do_key(ev, 'tree')
# @+node:ekr.20181121195235.1: *6* flx_tree.Selecting...
# @+node:ekr.20181123171958.1: *7* flx_tree.action.set_ap
@flx.action
def set_ap(self, ap):
    """self.selected_ap. Called from app.select_ap."""
    assert ap
    self.selected_ap = ap
    self.select_ap(self.selected_ap)
# @+node:ekr.20181116083916.1: *7* flx_tree.select_ap
@flx.action
def select_ap(self, ap):
    """
    Select the tree item corresponding to the given ap.

    Called from the mutator, and also on_selected_event.
    """
    # print('flx.tree.select_ap', repr(ap), ap ['headline'])
    key = self.ap_to_key(ap)
    item = self.tree_items_dict.get(key)
    if item:
        item.set_selected(True)
        self.selected_ap = ap  # Set the item's selected property.
    else:
        pass  # We may be in the middle of a redraw.
# @+node:ekr.20181109083659.1: *7* flx_tree.reaction.on_selected_event
@flx.reaction('tree.children**.selected')  # don't use mode="greedy" here!
def on_selected_event(self, *events):
    """
    Update c.p and c.p.b when the user selects a new tree node.

    This also gets fired on *unselection* events, which causes problems.
    """
    #
    # Reselect the present ap if there are no selection events.
    # This ensures that clicking a headline twice has no effect.
    if not any([ev.new_value for ev in events]):
            # Must use a comprehension above. flexx can't handle generator expressions.
        ev = events[0]
        self.assert_exists(ev)
        ap = ev.source.leo_ap
        self.assert_exists(ap)
        self.select_ap(ap)
        return
    #
    # handle selection events.
    for ev in events:
        if ev.new_value:  # A selection event.
            ap = ev.source.leo_ap
            # print('on_selected_event: select:', ap['headline'])
            self.select_ap(ap)  # Selects the corresponding LeoTreeItem.
            self.set_ap(ap)  # Sets self.selected_ap.
            # A helper action, calling tree.select.
            self.root.select_tree_using_ap(ap)
# @+node:ekr.20181108233657.1: *5* class LeoFlexxTreeItem
class LeoFlexxTreeItem(flx.TreeItem):

    def init(self, leo_ap):
        # pylint: disable=arguments-differ
        self.leo_ap = leo_ap  # Immutable: Gives access to cloned, marked, expanded fields.
        self.leo_children = []

    def getName(self):
        return 'head'  # Required, for proper pane bindings.

    @others
# @+node:ekr.20240404112831.1: *3* Import traces (do not delete)
@nosearch
# @+node:ekr.20240404112041.1: *4* Leo core module levels (by hand)
@language python
@nosearch

# Level 0: no imports at top level.
0 leo.core.leoApp
0 leo.core.leoBridge
0 leo.core.leoColor
0 leo.core.leoFastRedraw
0 leo.core.leoGlobals
0 leo.core.leoImport
0 leo.core.leoPymacs
0 leo.core.leoQt
0 leo.core.leoTokens
0 leo.core.leoVersion
0 leo.core.signal_manager
0 leo.core.tracing_utils

# Level 1: import only leoGlobals at top level.
1 leo.core.leoAst
1 leo.core.leoCache
1 leo.core.leoChapters
1 leo.core.leoCompare
1 leo.core.leoDebugger
1 leo.core.leoExternalFiles
1 leo.core.leoFind
1 leo.core.leoHistory
1 leo.core.leoKeys
1 leo.core.leoMarkup
1 leo.core.leoMenu
1 leo.core.leoNodes
1 leo.core.leoPersistence
1 leo.core.leoPlugins
1 leo.core.leoRope
1 leo.core.leoRst
1 leo.core.leoSessions
1 leo.core.leoShadow
1 leo.core.leoTips
1 leo.core.leoVim

# Level 1: import only level 0 modules at top level.
1 leo.core.leoBackground
    from leo.core import leoGlobals as g
    from leo.core.leoQt import QtCore
1 leo.core.leoColorizer
    from leo.core import leoGlobals as g
    from leo.core.leoColor import leo_color_database
    from leo.core.leoQt import Qsci, QtGui, QtWidgets, UnderlineStyle, Weight
1 leo.plugins.modScripting
    from leo.core import leoGlobals as g
    from leo.core import leoColor
    from leo.core import leoGui
1 leo.core.leoPrinting
    from leo.core import leoGlobals as g
    from leo.core.leoQt import DialogCode, printsupport, QtGui
1 leo.core.leoTest2
    from leo.core import leoGlobals as g
    from leo.core import leoApp
1 leo.core.runLeo
    from leo.core import leoGlobals as g
    from leo.core import leoApp

# Level 2: import only level 0 or 1 modules at top level.
2 leo.core.leoAtFile:
    from leo.core import leoGlobals as g
    from leo.core import leoNodes
2 leo.core.leoBeautify:
    from leo.core import leoGlobals as g
    from leo.core import leoAst
2 leo.core.leoCommands:  # ctor instantiates all sub-commanders!!!
    from leo.core import leoGlobals as g
    from leo.core import leoNodes
2 leo.core.leoConfig:
    from leo.core import leoGlobals as g
    from leo.plugins.mod_scripting import build_rclick_tree
2 leo.core.leoFileCommands:
    from leo.core import leoGlobals as g
    from leo.core import leoNodes

# Level 3: import only modules of level 0, 1 or 2 at top level.
3 leo.core.leoFrame
    from leo.core import leoGlobals as g
    from leo.core import leoColorizer
    from leo.core import leoMenu
    from leo.core import leoNodes
3 leo.core.leoserver
    from leo.core.leoCommands import Commands as Cmdr
    from leo.core.leoNodes import Position, VNode
    from leo.core.leoGui import StringFindTabManager
    from leo.core.leoExternalFiles import ExternalFilesController
3 leo.core.leoUndo
    from leo.core import leoGlobals as g
    from leo.core.leoFileCommands import FastRead
    from leo.core.leoNodes import Position, VNode

# Level 4: import only modules of level 0, 1, 2 or 3 at top level.
4 leo.core.leoGui
    from leo.core import leoGlobals as g
    from leo.core import leoFrame
4 leo.core.leoclient
    from leo.core import leoGlobals as g
    from leo.core import leoserver
# @+node:ekr.20240404151915.1: *4* Leo commands module levels (by hand)
# Level 1: import only leoGlobals at top level.
1 leo.commands.baseCommands
1 leo.commands.checkerCommands
1 leo.commands.editCommands
1 leo.commands.commanderHelpCommands
1 leo.commands.gototCommands

# Level 1: import only level 0 modules at top level.
1 leo.commands.commanderFileCommands
    from leo.core import leoGlobals as g
    from leo.core import leoImport
    
# Level 2: import only leoGlobals and leo.commands.baseCommands
2 leo.commands.bufferCommands
2 leo.commands.controlCommands
2 leo.commands.debugCommands
2 leo.commands.editCommands
2 leo.commands.helpCommands
2 leo.commands.keyCommands
2 leo.commands.killBufferCommands
2 leo.commands.rectangleCommands
2 leo.commands.spellCommands

# Level 2: import only level 0 or 1 modules at top level.
2 leo.commands.abbrevCommands
    from leo.core import leoGlobals as g
    from leo.core import leoNodes
    from leo.commands.baseCommands import BaseEditCommandsClass
2 leo.commands.commanderOutlineCommands
    from leo.core import leoGlobals as g
    from leo.core import leoNodes
    from leo.core import leoFileCommands
    
# Level 3: import only modules of level 0, 1 or 2 at top level.
3 leo.commands.convertCommands
    from leo.core import leoGlobals as g
    from leo.core import leoBeautify
    from leo.commands.baseCommands import BaseEditCommandsClass
3 leo.commands.editFileCommands
    from leo.core import leoGlobals as g
    from leo.core import leoCommands
    from leo.commands.baseCommands import BaseEditCommandsClass
# @+node:ekr.20251214043555.1: *3* Various buttons (attic)
# @+node:ekr.20251214043555.2: *4* @@button adoc
"""A prototype of running adoc and asciidoctor."""
import os
p = g.findNodeAnywhere(c, r'@adoc ad_test.adoc')
assert p
paths = c.asciiDoctorCommands.adoc_command(event=g.Bunch(p=p),verbose=False)
paths = [os.path.abspath(os.path.normpath(z)) for z in paths]
input_paths = ' '.join(paths)
g.execute_shell_commands(['asciidoctor %s' % input_paths])
output_paths = ' '.join([z.replace('.adoc', '.html') for z in paths])
for path in paths:
    print(f"wrote: {path}\n")
if 0:
    # optional: open .html files in the default browser.
    g.execute_shell_commands([output_paths])
@language python

# @+node:ekr.20150413091056.1: *4* @@button check-clones
"""Warn if leoProjects.txt or leoToDo.txt contain any clones."""

clones,nodes,seen = 0,0,set()
table = (
  '@file ../doc/leoProjects.txt',
  '@file ../doc/leoToDo.txt',
)

def check_clone(c,p0,root):
    """Warn if p appears in any @<file> node outside of root's tree."""
    global nodes,seen
    v = p0.v
    for p in c.all_positions():
        nodes += 1
        if p.v == v:
            # Check *all* ancestors, not just the nearest one.
            for parent in p.self_and_parents():
                nodes += 1
                if parent.isAnyAtFileNode() and parent.v != root.v:
                    if parent.v not in seen:
                        seen.add(parent.v)
                        g.es_print('%s and %s contain clone: %s' % (
                            root.h,parent.h,p0.h))

for h in table:
    root = g.findNodeAnywhere(c,h)
    if root:
        for p in root.self_and_subtree():
            nodes += 1
            if p.isCloned():
                clones += 1
                check_clone(c,p,root)
    else:
        g.es_print('not found',h,color='red')
print('done: %s nodes, %s clones' % (nodes,clones))

@tabwidth -4
@language python
# @+node:ekr.20211011090013.1: *4* @@button cvt-comments
"""
Find and convert hanging comments.

Warning: do not run this script on unit testing files.
"""
# https://github.com/leo-editor/leo-editor/pull/2622
g.cls()
import re
from typing import Any, List

trace = True  # It's useful to trace even when also replacing.
replace = True  # Replace body text.
max_line_length = 70  # Maximum line length for lines containing trailing comments.

@others

for p in c.p.self_and_subtree():
    convert(p)
print('done')
# @+node:ekr.20220425184232.1: *5* compute_lws
def compute_lws(s: str) -> int:
    """Return the leading whitespace of s."""
    n = len(s) - len(s.lstrip())
    return s[:n]
# @+node:ekr.20220425052306.1: *5* convert
compound_statement = (
    # Leo doesn't use 'match'
    'async ', 'class ', 'def ', 'else:', 'elif ', 'except',
    'finally:', 'for ', 'if ', 'try:', 'while ', 'with ',
)
pat = re.compile(r'\)\s*->.*:\s*$')  # End of multiline def.

def convert(p: Any) -> None:
    """Convert all hanging comments in p.b."""
    if p.isAnyAtFileNode():
        print('   Scan:', p.h)
    changed, last, result = False, '', []
    i, lines = 0, p.b.split('\n')
    sep = '-' * 40
    while i < len(lines):
        progress = i
        more_lines = []
        s = lines[i]
        s_s = s.lstrip()
        last_s = last.lstrip()
        lws_s = compute_lws(s)
        lws_last = compute_lws(last)
        if (
            last_s and s_s.startswith('#')
            and not pat.match(last_s)
            and not last_s.startswith(('#', '"""', "'''"))
            and not last_s.endswith(('[', '(', '{', '):'))
            and not last_s.startswith(compound_statement)
            and lws_last < lws_s
        ):
            changed = True
            result.pop()  # Discard the last line!
            j = i + 1  # Look for more indented lines.
            while j < len(lines):
                s2 = lines[j]
                s2_s = s2.strip()
                lws_s2 = compute_lws(s2)
                if lws_s2 >= lws_s and s2_s.startswith('#'):
                    more_lines.append(s2)
                    j += 1
                else:
                    break
            if not more_lines and len(last_s) + len(s_s) < max_line_length:
                # Emit one line.
                result.append(f"{last}  {s_s}")
                if trace:
                    g.printObj([last, s, sep, f"{last}  {s_s}\n"], tag=p.h)
            else:
                # Emit the first comment line.
                result.append(f"{lws_last}{s_s}")
                # Emit any additional comment lines.
                for z in more_lines:
                    result.append(f"{lws_last}{z.lstrip()}")
                # Emit the last (non-comment) line.
                result.append(last)
                if trace:
                    added_lines = [f"{lws_last}{z.lstrip()}" for z in more_lines]
                    g.printObj(
                        [last, s] + more_lines + [sep] +
                        [f"{lws_last}{s_s}"] + added_lines + [last], tag=p.h)
            i += 1 + len(more_lines)
            last = lines[i]
        else:
            result.append(s)
            last = s
            i += 1
        assert progress < i
    if changed:
        print('Changed:', p.h)
        if replace:
            p.b = '\n'.join(result)
# @+node:ekr.20251214043555.3: *4* @@button cycling syntax coloring
@language python

"""Cycle syntax coloring when there are multiple @langauge directives in a node."""
# Original by Terry Brown, Revised by EKR.
while not p.isRoot():
    if p.b.strip().startswith("@language "):
        lines = g.splitLines(p.b)
        words = lines[0].split()
        if len(words) > 2 and words[2][0].isalpha():
            # Cycle the languages on line 1.
            line0 = '%s %s %s\n' % (words[0], ' '.join(words[2:]), words[1])
            p.b = line0 + ''.join(lines[1:])
            c.selectVisBack()
            c.selectVisNext()
            break
    p.moveToParent()
else:
    g.es("No ambiguous @language directive found")
@language python
# @+node:ekr.20251214043555.4: *4* @@button expand-section
"""take a selected region,  convert each line to a section reference."""
# For Eric S. Johansson
current = c.p
body = c.frame.body
u,undoType = c.undoer, 'expand sections'
last = c.lastTopLevel()
wrapper = c.frame.body.wrapper
sel_1, sel_2 = wrapper.getSelectionRange()
ins = wrapper.getInsertPoint()
tab_width = c.getTabWidth(c.p)
head, lines, tail, oldSel, oldYview = c.getBodyLines()
u.beforeChangeGroup(current, undoType)
lines_3 = []
for s in lines:
    start_of_line, width_of_line = g.skip_leading_ws_with_indent(s, 0, c.tab_width)
    new_section_heading = g.angleBrackets('{}')
    l2 = s[start_of_line:].strip()
    header = new_section_heading.format(l2)
    # calculate replacement line for source material
    lines_3.append((" "*start_of_line) + l2 + "\n")
    undo_data = u.beforeInsertNode(current)
    new_node = p.insertAsLastChild()
    new_node.h = header
    new_node.b = "'''\n'''"
    u.afterInsertNode(new_node, 'Insert-Node', undo_data)
        # New
    c.redraw(current) # Selects the old node.
preserveSel = sel_1 == sel_2
if preserveSel:
    ins += tab_width
    oldSel = ins, ins
c.updateBodyPane(head, ''.join(lines_3), tail, undoType, oldSel, oldYview, preserveSel)
u.afterChangeGroup(current, undoType)
@language python
# @+node:ekr.20251214043555.5: *4* @@button graalvm
"""
Run the Javascripot code in the node selected in the outline.
"""
from subprocess import run
graal = r"C:\apps\graalvm-ce-java11-20.0.0\languages\js\bin\js.exe"
progfile = r"C:\test\grall_input.txt"
s = c.atFileCommands.stringToString(
    p, p.b, forcePythonSentinels=False, sentinels=False)
prog = s.replace("\r\n", "\n")  # Use brute force. 
with open(progfile, 'w') as f:
    f.write(s)
cmd = f'{graal} {progfile}'
result = run(cmd, capture_output=True, text=True, encoding='utf-8')
print(result.stdout or 'no output')
@language python

# @+node:ekr.20201013034742.2: *4* @@button make-importer
g.cls()
# define constants that describe the new language.
name = 'php'  # The name of the file, and the prefix for classes.
language = 'php'  # The name of the language, case doesn't matter.
extensions = ['.php',]  # A list of file extensions supported by this importer.
strict = False  # True if leading whitespace is particularly significant.
state_ivar = 'self.curlies'
    # 'self.indent' for python, coffeescript.
    # 'self.curlies' for many other languages
    # '(self, curlies, self.parens)' for more complex comparisons
<< define run & helpers >>
run(extensions, language, name, state_ivar)
# @+node:ekr.20201013034742.3: *5* << define run & helpers >>
@others
# @+node:ekr.20201013034742.4: *6* copy_tree (make-importer)
def copy_tree(source, root, h):
    """Copy the source tree to the node after p, with headline h."""
    p2 = root.insertAfter()
    source.copyTreeFromSelfTo(p2)
    p2.h = h
    return p2
 
# @+node:ekr.20201013034742.5: *6* make_substitutions
def make_substitutions(destination, patterns):
    """Make all substitutions in the destination tree."""
    for p in destination.self_and_subtree():
        h = substitute(p.h, patterns)
        if p.h != h:
            p.h = h
        b = substitute(p.b, patterns)
        if p.b != b:
            p.b = b
# @+node:ekr.20201013034742.6: *6* run
def run(extensions, language, name, state_ivar):
    """The driver for this script."""
    patterns = {
        'cap_name': name.capitalize(),
        'extensions': '[%s]' % ', '.join(["'%s'" % (z) for z in extensions]),
        'language': language.lower(),
        'name': name.lower(),
        'strict': 'True' if strict else 'False',
        'state_ivar': state_ivar,
    }
    h = '@button make-importer'
    root = g.findNodeAnywhere(c, h)
    assert root, h
    h = '@@file importers/{{name}}.py'
    source = g.findNodeInTree(c, root, h)
    assert source, h
    destination = copy_tree(source, root, h)
    make_substitutions(destination, patterns)
    c.contractAllHeadlines()
    c.redraw()
# @+node:ekr.20201013034742.7: *6* substitue
def substitute(s, patterns):
    """Make all substitutions in s."""
    for pattern in patterns:
        find = '{{%s}}' % pattern
        replace = patterns.get(pattern)
        i = 0
        while i < len(s):
            progress = i
            j = s.find(find, i)
            if j == -1: break
            s = s[:j] + replace + s[j+len(find):]
            i = j+len(replace)
            assert progress < i
    return s
# @+node:ekr.20201013034742.8: *5* @@file importers/{{name}}.py
"""The @auto importer for the {{name}} language."""
import leo.plugins.importers.linescanner as linescanner
Importer = linescanner.Importer
@others
importer_dict = {
    'class': {{cap_name}}_Importer,
    'extensions': {{extensions}},
}
@language python
@tabwidth -4


# @+node:ekr.20201013034742.9: *6* class {{cap_name}}_Importer
class {{cap_name}}_Importer(Importer):
    """The importer for the {{name}} lanuage."""

    def __init__(self, c):
        """{{cap_name}}_Importer.__init__"""
        # Init the base class.
        Importer.__init__(self,
            c,
            language = '{{language}}',
        )
        
    @others
# @+node:ekr.20201013034742.10: *7* {{name}}.Overrides
# These can be overridden in subclasses.
# @+node:ekr.20201013034742.11: *8* {{name}}.clean_headline
# Define an override if desired...

if 0: # The base class
    def clean_headline(self, s):
        """Return a cleaned up headline s."""
        return s.strip()
        
if 0: # A more complex example, for the C language.
    def clean_headline(self, s):
        """Return a cleaned up headline s."""
        import re
        type1 = r'(static|extern)*'
        type2 = r'(void|int|float|double|char)*'
        class_pattern = r'\s*(%s)\s*class\s+(\w+)' % (type1)
        pattern = r'\s*(%s)\s*(%s)\s*(\w+)' % (type1, type2)
        m = re.match(class_pattern, s)
        if m:
            prefix1 = '%s ' % (m.group(1)) if m.group(1) else ''
            return '%sclass %s' % (prefix1, m.group(2))
        m = re.match(pattern, s)
        if m:
            prefix1 = '%s ' % (m.group(1)) if m.group(1) else ''
            prefix2 = '%s ' % (m.group(2)) if m.group(2) else ''
            h = m.group(3) or '<no c function name>'
            return '%s%s%s' % (prefix1, prefix2, h)
        else:
            return s
# @+node:ekr.20201013034742.12: *8* {{name}}.clean_nodes
def clean_nodes(self, parent):
    """
    Clean all nodes in parent's tree.
    Subclasses override this as desired.
    See perl_i.clean_nodes for an examplle.
    """
    pass
# @+node:ekr.20201013034742.13: *6* class {{cap_name}}_ScanState
class {{cap_name}}_ScanState:
    """A class representing the state of the {{name}} line-oriented scan."""
    
    def __init__(self, d=None):
        """{{cap_name}}_ScanState.__init__"""
        if d:
            prev = d.get('prev')
            self.context = prev.context
            # Adjust these by hand.
            self.curlies = prev.curlies
        else:
            self.context = ''
            # Adjust these by hand.
            self.curlies = 0

    def __repr__(self):
        """{{cap_name}}_ScanState.__repr__"""
        # Adjust these by hand.
        return "{{cap_name}}_ScanState context: %r curlies: %s" % (
            self.context, self.curlies)

    __str__ = __repr__

    @others
# @+node:ekr.20251214043555.6: *4* @@button print-gnx
"""@button (ekr.leo) print the gnx."""
# g.cls()
print('timestamp: %s lastIndex: %s' % (g.app.nodeIndices.timeString,g.app.nodeIndices.lastIndex))
print('gnxs: -----')
for p in c.p.self_and_subtree():
    print('%s %s' % (p.v.gnx,p.h))
print('uAs: -----')
for p in c.p.self_and_subtree():
    if p.v.u:
        print('%s %s' % (p.v.u,p.h))
# print('done')

@language python
# @+node:ekr.20251214043555.7: *4* @@button print-ua
d = p.v.u
if d:
    for key in sorted(d):
        print('%10s %s' % (key, d.get(key)))

@language python
# @+node:ekr.20251214043555.8: *4* @@button save-clipboard
"""
Save an image on the clipboard to .leo/screen_captures.
Use Alt-PrintScn to copy the active window to the clipboard.
"""
import time
app = g.app.gui.qtApp
app.processEvents()
image = app.clipboard().image()
if not image.isNull():
    base = r'C:\Users\edreamleo\.leo\screen_captures'
    assert g.os_path_exists(base), repr(base)
    fn = '%s.png' % time.strftime('%Y-%m-%d-%H.%M.%S')
    path = g.os_path_finalize_join(base, fn)
    image.save(path, 'png')
    g.es_print('saved: %s' % path)

@language python
# @+node:ekr.20251214043555.9: *4* @@button screen-shot @key=ctrl-1
"""Create a screenshot of the present Leo outline and save it to .leo/screen_captures."""
#  --window-size=682x1264 is recommended.
from leo.core.leoQt import isQt5, QtGui
import time
base = r'C:\Users\edreamleo\.leo\screen_captures'
assert g.os_path_exists(base), repr(base)
window = g.app.gui.qtApp.activeWindow()
w = window.grab() if isQt5 else QtGui.QPixmap.grabWindow(window.winID())
fn = '%s.png' % time.strftime('%Y-%m-%d-%H-%M-%S')
path = g.os_path_finalize_join(base, fn)
w.save(path, 'png')
g.es_print('saved: %s' % path)

@language python
# @+node:ekr.20220527065937.1: *4* @@command json-to-outline
g.cls()
import json
import os
import textwrap
path = r'C:\Repos\leo-editor\mypy_stubs\3.9\leo\core\leoApp.data.json'
with open(path, 'r') as f:
    d = json.load(f)

def make_outline(d, p, topFlag):
    """Set p.b from dict d. Generate child nodes for inner dicts.."""
    result = []
    for key, value in d.items():
        if isinstance(value, dict):
            h2 = 'top' if topFlag else p.h[2:-2].strip()
            section_name = g.angleBrackets(f" {h2}.{key} ")
            result.append(f'"{key}": {{\n')
            result.append(f"    {section_name}\n")
            result.append('}\n')
            child = p.insertAsLastChild()
            child.h = section_name
            make_outline(value, child, topFlag=False)
        elif isinstance(value, str):
            result.append(f'"{key}": "{value}"\n')
        else:
            result.append(f'"{key}": {value}\n')
    p.b = ''.join(result)

# Recursively make the outline.
top = c.lastTopLevel().insertAfter()
top.h = os.path.basename(path)
make_outline(d, top, topFlag=True)
# Adjust top.b.
top.b = f"{{\n{textwrap.indent(top.b.strip(),' '*4)}\n}}\n"
c.redraw(top)
    
# @+node:ekr.20260110082926.1: *3* leoTokens.py and related tests
# @+node:ekr.20240105140814.1: *4* leoTokens.py
# This file is part of Leo: https://leo-editor.github.io/leo-editor
# Leo's copyright notice is based on the MIT license:
# https://leo-editor.github.io/leo-editor/license.html

# This file may be compiled with mypyc as follows:
# python -m mypyc leo\core\leoTokens.py --strict-optional

<< leoTokens.py: docstring >>
<< leoTokens.py: imports & annotations >>


@others

if __name__ == '__main__':
    main()  # pragma: no cover

@language python
@tabwidth -4
@pagewidth 70
# @+node:ekr.20240105140814.2: *5* << leoTokens.py: docstring >>
"""
leoTokens.py: A beautifier for Python that uses *only* tokens.

For help: `python -m leo.core.leoTokens --help`

Use Leo https://leo-editor.github.io/leo-editor/ to study this code!

Without Leo, you will see special **sentinel comments** that create
Leo's outline structure. These comments have the form::

    `#@<comment-kind>:<user-id>.<timestamp>.<number>: <outline-level> <headline>`
"""

# @+node:ekr.20240105140814.3: *5* << leoTokens.py: imports & annotations >>
from __future__ import annotations
import argparse
import difflib
import glob
import keyword
import io
import os
import re
import textwrap
import time
import tokenize
from typing import Generator, Optional, Union, TYPE_CHECKING

# Leo Imports.
from leo.core import leoGlobals as g

assert g

if TYPE_CHECKING:
    from leo.core.leoNodes import Position

SettingsDict = dict[str, Union[int, bool]]
# @+node:ekr.20240214065940.1: *5* top-level functions (leoTokens.py)
# @+node:ekr.20240105140814.41: *6* function: dump_contents
def dump_contents(contents: str, tag: str = 'Contents') -> None:  # pragma: no cover
    print('')
    print(f"{tag}...\n")
    for i, z in enumerate(g.splitLines(contents)):
        print(f"{i + 1:<3} ", z.rstrip())
    print('')


# @+node:ekr.20240105140814.42: *6* function: dump_lines
def dump_lines(tokens: list[InputToken], tag: str = 'lines') -> None:  # pragma: no cover
    print('')
    print(f"{tag}...\n")
    for z in tokens:
        if z.line.strip():
            print(z.line.rstrip())
        else:
            print(repr(z.line))
    print('')


# @+node:ekr.20240105140814.43: *6* function: dump_results
def dump_results(results: list[str], tag: str = 'Results') -> None:  # pragma: no cover
    print('')
    print(f"{tag}...\n")
    print(''.join(results))
    print('')


# @+node:ekr.20240105140814.44: *6* function: dump_tokens
def dump_tokens(tokens: list[InputToken], tag: str = 'Tokens') -> None:  # pragma: no cover
    print('')
    print(f"{tag}...\n")
    if not tokens:
        return
    print("Note: values shown are repr(value) *except* for 'string' and 'fstring*' tokens.")
    tokens[0].dump_header()
    for z in tokens:
        print(z.dump())
    print('')


# @+node:ekr.20240105140814.27: *6* function: input_tokens_to_string
def input_tokens_to_string(tokens: list[InputToken]) -> str:  # pragma: no cover
    """Return the string represented by the list of tokens."""
    if tokens is None:
        # This indicates an internal error.
        print('')
        print('===== input token list is None ===== ')
        print('')
        return ''
    return ''.join([z.to_string() for z in tokens])


# @+node:ekr.20240926050431.1: *6* function: beautify_file (leoTokens.py)
def beautify_file(filename: str) -> bool:
    """
    Beautify the given file, writing it if has changed.
    """
    settings: SettingsDict = {
        'all': False,  # Don't beautify all files.
        'beautified': True,  # Report changed files.
        'diff': False,  # Don't show diffs.
        'report': True,  # Report changed files.
        'write': True,  # Write changed files.
    }
    tbo = TokenBasedOrange(settings)
    return tbo.beautify_file(filename)


# @+node:ekr.20240105140814.121: *6* function: main (leoTokens.py)
def main() -> None:  # pragma: no cover
    """Run commands specified by sys.argv."""
    args, settings_dict, arg_files = scan_args()
    cwd = os.getcwd()

    # Calculate requested files.
    requested_files: list[str] = []
    for path in arg_files:
        if path.endswith('.py'):
            requested_files.append(os.path.join(cwd, path))
        else:
            root_dir = os.path.join(cwd, path)
            requested_files.extend(glob.glob(f'{root_dir}**{os.sep}*.py', recursive=True))
    if not requested_files:
        # print(f"No files in {arg_files!r}")
        return

    # Calculate the actual list of files.
    modified_files = g.getModifiedFiles(cwd)

    def is_dirty(path: str) -> bool:
        return os.path.abspath(path) in modified_files

    # Compute the files to be checked.
    if args.all:
        # Handle all requested files.
        to_be_checked_files = requested_files
    else:
        # Handle only modified files.
        to_be_checked_files = [z for z in requested_files if is_dirty(z)]

    # Compute the dirty files among the to-be-checked files.
    dirty_files = [z for z in to_be_checked_files if is_dirty(z)]

    # Do the command.
    if to_be_checked_files:
        orange_command(arg_files, requested_files, dirty_files, to_be_checked_files, settings_dict)


# @+node:ekr.20240105140814.5: *6* function: orange_command (leoTokens.py)
def orange_command(
    arg_files: list[str],
    requested_files: list[str],
    dirty_files: list[str],
    to_be_checked_files: list[str],
    settings: Optional[SettingsDict] = None,
) -> None:  # pragma: no cover
    """The outer level of the 'tbo/orange' command."""
    t1 = time.process_time()
    # n_tokens = 0
    n_beautified = 0
    if settings is None:
        settings = {}
    for filename in to_be_checked_files:
        if os.path.exists(filename):
            tbo = TokenBasedOrange(settings)
            beautified = tbo.beautify_file(filename)
            if beautified:
                n_beautified += 1
            # n_tokens += len(tbo.input_tokens)
        else:
            print(f"file not found: {filename}")
    # Report the results.
    t2 = time.process_time()
    if n_beautified or settings.get('report'):
        print(
            f"tbo: {t2 - t1:4.2f} sec. "
            f"dirty: {len(dirty_files):<3} "
            f"checked: {len(to_be_checked_files):<3} "
            f"beautified: {n_beautified:<3} in {','.join(arg_files)}"
        )


# @+node:ekr.20240105140814.10: *6* function: scan_args (leoTokens.py)
def scan_args() -> tuple[argparse.Namespace, SettingsDict, list[str]]:  # pragma: no cover
    description = textwrap.dedent("""Beautify or diff files""")
    parser = argparse.ArgumentParser(
        description=description,
        formatter_class=argparse.RawTextHelpFormatter,
    )
    parser.add_argument('PATHS', nargs='*', help='directory or list of files')
    add2 = parser.add_argument

    # Arguments.
    add2(
        '-a',
        '--all',
        dest='all',
        action='store_true',
        help='Beautify all files, even unchanged files',
    )
    add2(
        '-b',
        '--beautified',
        dest='beautified',
        action='store_true',
        help='Report beautified files individually, even if not written',
    )
    add2(
        '-d',
        '--diff',
        dest='diff',
        action='store_true',
        help='show diffs instead of changing files',
    )
    add2('-r', '--report', dest='report', action='store_true', help='show summary report')
    add2(
        '-w',
        '--write',
        dest='write',
        action='store_true',
        help='write beautifed files (dry-run mode otherwise)',
    )

    # Create the return values, using EKR's prefs as the defaults.
    parser.set_defaults(
        all=False,
        beautified=False,
        diff=False,
        report=False,
        write=False,
        tab_width=4,
    )
    args: argparse.Namespace = parser.parse_args()
    files = args.PATHS

    # Create the settings dict, ensuring proper values.
    settings_dict: SettingsDict = {
        'all': bool(args.all),
        'beautified': bool(args.beautified),
        'diff': bool(args.diff),
        'report': bool(args.report),
        'write': bool(args.write),
    }
    return args, settings_dict, files


# @+node:ekr.20240105140814.52: *5* Classes
# @+node:ekr.20240105140814.51: *6* class InternalBeautifierError(Exception)
class InternalBeautifierError(Exception):
    """
    An internal error in the beautifier.

    Errors in the user's source code may raise standard Python errors
    such as IndentationError or SyntaxError.
    """


# @+node:ekr.20240105140814.53: *6* class InputToken
class InputToken:  # leoTokens.py.
    """A class representing a TBO input token."""

    __slots__ = (
        'context',
        'index',
        'kind',
        'line',
        'line_number',
        'value',
    )

    def __init__(
        self,
        kind: str,
        value: str,
        index: int,
        line: str,
        line_number: int,
    ) -> None:
        self.context: Optional[str] = None
        self.index = index
        self.kind = kind
        self.line = line  # The entire line containing the token.
        self.line_number = line_number
        self.value = value

    def __repr__(self) -> str:  # pragma: no cover
        s = f"{self.index:<5} {self.kind:>8}"
        return f"Token {s}: {self.show_val(20):22}"

    def __str__(self) -> str:  # pragma: no cover
        s = f"{self.index:<5} {self.kind:>8}"
        return f"Token {s}: {self.show_val(20):22}"

    def to_string(self) -> str:
        """Return the contribution of the token to the source file."""
        return self.value if isinstance(self.value, str) else ''

    @others


# @+node:ekr.20240105140814.54: *7* itoken.brief_dump
def brief_dump(self) -> str:  # pragma: no cover
    """Dump a token."""
    token_s = f"{self.kind:>10} : {self.show_val(10):12}"
    return f"<line: {self.line_number} index: {self.index:3} {token_s}>"

# @+node:ekr.20240105140814.55: *7* itoken.dump
def dump(self) -> str:  # pragma: no cover
    """Dump a token and related links."""
    return f"{self.line_number:4} {self.index:>5} {self.kind:>15} {self.show_val(100)}"

# @+node:ekr.20240105140814.56: *7* itoken.dump_header
def dump_header(self) -> None:  # pragma: no cover
    """Print the header for token.dump"""
    print(
        f"\n"
        f"         node    {'':10} token {'':10}   token\n"
        f"line index class {'':10} index {'':10} kind value\n"
        f"==== ===== ===== {'':10} ===== {'':10} ==== =====\n"
    )

# @+node:ekr.20240105140814.57: *7* itoken.error_dump
def error_dump(self) -> str:  # pragma: no cover
    """Dump a token for error message."""
    return f"index: {self.index:<3} {self.kind:>12} {self.show_val(20):<20}"

# @+node:ekr.20240105140814.58: *7* itoken.show_val
def show_val(self, truncate_n: int = 8) -> str:  # pragma: no cover
    """Return the token.value field."""
    if self.kind in ('dedent', 'indent', 'newline', 'ws'):
        # val = str(len(self.value))
        val = repr(self.value)
    elif self.kind == 'string' or self.kind.startswith('fstring'):
        # repr would be confusing.
        val = g.truncate(self.value, truncate_n)
    else:
        val = g.truncate(repr(self.value), truncate_n)
    return val

# @+node:ekr.20240105143307.1: *6* class Tokenizer
class Tokenizer:
    """
    Use Python's tokenizer module to create InputTokens
    See: https://docs.python.org/3/library/tokenize.html
    """

    __slots__ = (
        'contents',
        'fstring_level',
        'fstring_line',
        'fstring_line_number',
        'fstring_values',
        'lines',
        'offsets',
        'prev_offset',
        'token_index',
        'token_list',
    )

    def __init__(self) -> None:
        self.contents: str = ''
        self.offsets: list[int] = [0]  # Index of start of each line.
        self.prev_offset = -1
        self.token_index = 0
        self.token_list: list[InputToken] = []
        # Describing the scanned f-string...
        self.fstring_level: int = 0
        self.fstring_line: Optional[str] = None
        self.fstring_line_number: Optional[int] = None
        self.fstring_values: Optional[list[str]] = None

    @others


# @+node:ekr.20240105143307.2: *7* Tokenizer.add_token
def add_token(
    self,
    kind: str,
    line: str,
    line_number: int,
    value: str,
) -> None:
    """
    Add an InputToken to the token list.

    Convert (potentially nested!) fstrings to simple strings.
    """
    if kind == 'fstring_start':
        self.fstring_level += 1
        if self.fstring_level == 1:
            self.fstring_values = []
            self.fstring_line = line
            self.fstring_line_number = line_number
        self.fstring_values.append(value)
        return
    if self.fstring_level > 0:
        # Accumulating an f-string.
        self.fstring_values.append(value)
        if kind == 'fstring_end':
            self.fstring_level -= 1
        if self.fstring_level > 0:
            return
        # End of the outer f-string.
        # Create a single 'string' token from the saved values.
        kind = 'string'
        value = ''.join(self.fstring_values)
        # Use the line and line number of the 'string-start' token.
        line = self.fstring_line or ''
        line_number = self.fstring_line_number or 0
        # Clear the saved values.
        assert self.fstring_level == 0
        self.fstring_line = None
        self.fstring_line_number = None
        self.fstring_values = None

    tok = InputToken(kind, value, self.token_index, line, line_number)
    self.token_index += 1
    self.token_list.append(tok)

# @+node:ekr.20240105143214.2: *7* Tokenizer.check_results
def check_results(self, contents: str) -> None:
    # Split the results into lines.
    result = ''.join([z.to_string() for z in self.token_list])
    result_lines = g.splitLines(result)
    # Check.
    ok = result == contents and result_lines == self.lines
    assert ok, (
        f"\n"
        f"      result: {result!r}\n"
        f"    contents: {contents!r}\n"
        f"result_lines: {result_lines}\n"
        f"       lines: {self.lines}"
    )

# @+node:ekr.20240105143214.3: *7* Tokenizer.check_round_trip
def check_round_trip(self, contents: str, tokens: list[InputToken]) -> bool:
    result = self.tokens_to_string(tokens)
    ok = result == contents
    if not ok:  # pragma: no cover
        print('\nRound-trip check FAILS')
        print('Contents...\n')
        print(contents)
        print('\nResult...\n')
        print(result)
    return ok

# @+node:ekr.20240105143214.4: *7* Tokenizer.create_input_tokens
def create_input_tokens(
    self,
    contents: str,
    five_tuples: Generator,
) -> list[InputToken]:
    """
    InputTokenizer.create_input_tokens.

    Return list of InputToken's from tokens, a list of 5-tuples.
    """
    # Remember the contents for debugging.
    self.contents = contents

    # Create the physical lines.
    self.lines = contents.splitlines(True)

    # Create the list of character offsets of the start of each physical line.
    last_offset = 0
    for line in self.lines:
        last_offset += len(line)
        self.offsets.append(last_offset)

    # Create self.token_list.
    for five_tuple in five_tuples:
        self.do_token(contents, five_tuple)

    # Print the token list when tracing.
    self.check_results(contents)
    return self.token_list

# @+node:ekr.20240105143214.5: *7* Tokenizer.do_token (the gem)
def do_token(self, contents: str, five_tuple: tuple) -> None:
    """
    Handle the given token, optionally including between-token whitespace.

    https://docs.python.org/3/library/tokenize.html
    https://docs.python.org/3/library/token.html

    five_tuple is a named tuple with these fields:
    - type:     The token type;
    - string:   The token string.
    - start:    (srow: int, scol: int) The row (line_number!) and column
                where the token begins in the source.
    - end:      (erow: int, ecol: int)) The row (line_number!) and column
                where the token ends in the source;
    - line:     The *physical line on which the token was found.
    """
    import token as token_module

    # Unpack..
    tok_type, val, start, end, line = five_tuple
    s_row, s_col = start  # row/col offsets of start of token.
    e_row, e_col = end  # row/col offsets of end of token.
    line_number = s_row
    kind = token_module.tok_name[tok_type].lower()
    # Calculate the token's start/end offsets: character offsets into contents.
    s_offset = self.offsets[max(0, s_row - 1)] + s_col
    e_offset = self.offsets[max(0, e_row - 1)] + e_col
    # tok_s is corresponding string in the line.
    tok_s = contents[s_offset:e_offset]
    # Add any preceding between-token whitespace.
    ws = contents[self.prev_offset : s_offset]
    if ws:
        # Create the 'ws' pseudo-token.
        self.add_token('ws', line, line_number, ws)
    # Always add token, even if it contributes no text!
    self.add_token(kind, line, line_number, tok_s)
    # Update the ending offset.
    self.prev_offset = e_offset

# @+node:ekr.20240105143214.6: *7* Tokenizer.make_input_tokens (entry)
def make_input_tokens(self, contents: str) -> list[InputToken]:
    """
    Return a list  of InputToken objects using tokenize.tokenize.

    Perform consistency checks and handle all exceptions.
    """
    try:
        five_tuples = tokenize.tokenize(io.BytesIO(contents.encode('utf-8')).readline)
    except Exception as e:  # pragma: no cover
        print(f"make_input_tokens: exception {e!r}")
        return []
    tokens = self.create_input_tokens(contents, five_tuples)
    if 1:
        # True: 2.9 sec. False: 2.8 sec.
        assert self.check_round_trip(contents, tokens)
    return tokens

# @+node:ekr.20240105143214.7: *7* Tokenizer.tokens_to_string
def tokens_to_string(self, tokens: list[InputToken]) -> str:
    """Return the string represented by the list of tokens."""
    if tokens is None:  # pragma: no cover
        # This indicates an internal error.
        print('')
        print('===== No tokens ===== ')
        print('')
        return ''
    return ''.join([z.to_string() for z in tokens])

# @+node:ekr.20240105140814.108: *6* class ParseState
class ParseState:
    """
    A class representing items in the parse state stack.

    The present states:

    'file-start': Ensures the stack stack is never empty.

    'decorator': The last '@' was a decorator.

        do_op():    push_state('decorator')
        do_name():  pops the stack if state.kind == 'decorator'.

    'indent': The indentation level for 'class' and 'def' names.

        do_name():      push_state('indent', self.level)
        do_dendent():   pops the stack once or
                        twice if state.value == self.level.
    """

    __slots__ = ('kind', 'value')

    def __init__(self, kind: str, value: Union[int, str, None]) -> None:
        self.kind = kind
        self.value = value

    def __repr__(self) -> str:
        return f"State: {self.kind} {self.value!r}"  # pragma: no cover

    def __str__(self) -> str:
        return f"State: {self.kind} {self.value!r}"  # pragma: no cover


# @+node:ekr.20240128114842.1: *6* class ScanState
class ScanState:  # leoTokens.py.
    """
    A class representing tbo.pre_scan's scanning state.

    Valid (kind, value) pairs:

       kind  Value
       ====  =====
      'args' None
      'from' None
    'import' None
     'slice' list of colon indices
      'dict' list of colon indices

    """

    __slots__ = ('kind', 'token', 'value')

    def __init__(self, kind: str, token: InputToken) -> None:
        self.kind = kind
        self.token = token
        self.value: list[int] = []  # Not always used.

    def __repr__(self) -> str:  # pragma: no cover
        return f"ScanState: i: {self.token.index:<4} kind: {self.kind} value: {self.value}"

    def __str__(self) -> str:  # pragma: no cover
        return f"ScanState: i: {self.token.index:<4} kind: {self.kind} value: {self.value}"


# @+node:ekr.20240105145241.1: *6* class TokenBasedOrange
class TokenBasedOrange:  # Orange is the new Black.
    << TokenBasedOrange: docstring >>
    << TokenBasedOrange: __slots__ >>
    << TokenBasedOrange: python-related constants >>

    @others


# @+node:ekr.20240119062227.1: *7* << TokenBasedOrange: docstring >>
@language rest
@wrap

"""
Leo's token-based beautifier, three times faster than the beautifier in leoAst.py.

**Design**

The *pre_scan* method is the heart of the algorithm. It sets context
for the `:`, `=`, `**` and `.` tokens *without* using the parse tree.
*pre_scan* calls three *finishers*.

Each finisher uses a list of *relevant earlier tokens* to set the
context for one kind of (input) token. Finishers look behind (in the
stream of input tokens) with essentially no cost.

After the pre-scan, *tbo.beautify* (the main loop) calls *visitors*
for each separate type of *input* token.

Visitors call *code generators* to generate strings in the output
list, using *lazy evaluation* to generate whitespace.
"""

# @+node:ekr.20240111035404.1: *7* << TokenBasedOrange: __slots__ >>
__slots__ = [
    # Command-line arguments.
    'all',
    'beautified',
    'diff',
    'report',
    'write',
    # Global data.
    'contents',
    'filename',
    'input_tokens',
    'output_list',
    'tab_width',
    'insignificant_tokens',  # New.
    # Token-related data for visitors.
    'index',
    'input_token',
    'line_number',
    'pending_lws',
    'pending_ws',
    'prev_output_kind',
    'prev_output_value',
    # Parsing state for visitors.
    'decorator_seen',
    'in_arg_list',
    'in_doc_part',
    'state_stack',
    'verbatim',
    # Whitespace state. Don't even *think* about changing these!
    'curly_brackets_level',
    'indent_level',
    'lws',
    'paren_level',
    'square_brackets_stack',
    # Regular expressions.
    'at_others_pat',
    'beautify_pat',
    'comment_pat',
    'end_doc_pat',
    'nobeautify_pat',
    'nobeautify_sentinel_pat',
    'node_pat',
    'start_doc_pat',
]
# @+node:ekr.20240116040458.1: *7* << TokenBasedOrange: python-related constants >>
insignificant_kinds = (
    'comment',
    'dedent',
    'encoding',
    'endmarker',
    'indent',
    'newline',
    'nl',
    'ws',
)

# 'name' tokens that may appear in expressions.
operator_keywords = (
    'await',  # Debatable.
    'and',
    'in',
    'not',
    'not in',
    'or',  # Operators.
    'True',
    'False',
    'None',  # Values.
)
# @+node:ekr.20240105145241.2: *7* tbo.ctor
def __init__(self, settings: Optional[SettingsDict] = None):
    """Ctor for Orange class."""

    # Set default settings.
    if settings is None:
        settings = {}

    # Hard-code 4-space tabs.
    self.tab_width = 4

    # Define tokens even for empty files.
    self.input_token: InputToken = None
    self.input_tokens: list[InputToken] = []
    self.lws: str = ""  # Set only by Indent/Dedent tokens.
    self.pending_lws: str = ""
    self.pending_ws: str = ""

    # Set by gen_token and all do_* methods that bypass gen_token.
    self.prev_output_kind: str = None
    self.prev_output_value: str = None

    # Set ivars from the settings dict *without* using setattr.
    self.all = settings.get('all', False)
    self.beautified = settings.get('beautified', False)
    self.diff = settings.get('diff', False)
    self.report = settings.get('report', False)
    self.write = settings.get('write', False)

    # The list of tokens that tbo._next/_prev skip.
    self.insignificant_tokens = (
        'comment',
        'dedent',
        'indent',
        'newline',
        'nl',
        'ws',
    )

    # fmt: off

    # General patterns.
    self.beautify_pat = re.compile(
        r'#\s*pragma:\s*beautify\b|#\s*@@beautify|#\s*@\+node|#\s*@[+-]others|#\s*@[+-]<<')
    self.comment_pat = re.compile(r'^(\s*)#[^@!# \n]')
    self.nobeautify_pat = re.compile(r'\s*#\s*pragma:\s*no\s*beautify\b|#\s*@@nobeautify')
    self.nobeautify_sentinel_pat = re.compile(r'^#\s*@@nobeautify\s*$', re.MULTILINE)

    # Patterns from FastAtRead class, specialized for python delims.
    self.node_pat = re.compile(r'^(\s*)#@\+node:([^:]+): \*(\d+)?(\*?) (.*)$')  # @node
    self.start_doc_pat = re.compile(r'^\s*#@\+(at|doc)?(\s.*?)?$')  # @doc or @
    self.at_others_pat = re.compile(r'^(\s*)#@(\+|-)others\b(.*)$')  # @others

    # Doc parts end with @c or a node sentinel. Specialized for python.
    self.end_doc_pat = re.compile(r"^\s*#@(@(c(ode)?)|([+]node\b.*))$")

    # fmt: on

# @+node:ekr.20240126012433.1: *7* tbo: Checking & dumping
# @+node:ekr.20240106220724.1: *8* tbo.dump_token_range
def dump_token_range(self, i1: int, i2: int, tag: Optional[str] = None) -> None:  # pragma: no cover
    """Dump the given range of input tokens."""
    if tag:
        print(tag)
    for token in self.input_tokens[i1 : i2 + 1]:
        print(token.dump())

# @+node:ekr.20240112082350.1: *8* tbo.internal_error_message
def internal_error_message(self) -> None:  # pragma: no cover
    """Print the details of the internal error."""
    # Compute data.
    line_number = self.input_token.line_number
    lines = g.splitLines(self.contents)
    n1 = max(0, line_number - 5)
    n2 = min(line_number + 5, len(lines))
    prev_lines = ['\n']
    for i in range(n1, n2):
        marker_s = '***' if i + 1 == line_number else '   '
        prev_lines.append(f"Line {i + 1:5}:{marker_s}{lines[i]!r}\n")

    # Print the messages.
    print('')
    g.es_print('Error in token-based beautifier!')
    g.es_print(f"At token {self.index}, line: {line_number} file: {self.filename}")
    g.es_print("Please report this message to Leo's developers")
    print('')
    g.es_print(g.objToString(prev_lines))

# @+node:ekr.20240226131015.1: *8* tbo.user_error_message
def user_error_message(self, message: str) -> str:  # pragma: no cover
    """Print a message about a user error."""
    # Compute lines_s.
    line_number = self.input_token.line_number
    lines = g.splitLines(self.contents)
    n1 = max(0, line_number - 5)
    n2 = min(line_number + 5, len(lines))
    prev_lines = ['\n']
    for i in range(n1, n2):
        marker_s = '***' if i + 1 == line_number else '   '
        prev_lines.append(f"Line {i + 1:5}:{marker_s}{lines[i]!r}\n")
    context_s = ''.join(prev_lines) + '\n'

    # Return the full error message.
    return f"{message.strip()}\n\nAt token {self.index}, line: {line_number} file: {self.filename}\n{context_s}"

# @+node:ekr.20240117053310.1: *8* tbo.oops
def oops(self, message: str) -> None:  # pragma: no cover
    """Raise InternalBeautifierError."""
    self.internal_error_message()
    raise InternalBeautifierError(message)

# @+node:ekr.20240105145241.4: *7* tbo: Entries & helpers
# @+node:ekr.20240105145241.5: *8* tbo.beautify (main token loop)
def no_visitor(self) -> None:  # pragma: no cover
    self.oops(f"Unknown kind: {self.input_token.kind!r}")

def beautify(
    self,
    contents: str,
    filename: str,
    input_tokens: list[InputToken],
) -> str:
    """
    The main line. Create output tokens and return the result as a string.

    beautify_file and beautify_file_def call this method.
    """
    << tbo.beautify: init ivars >>

    try:
        # Pre-scan the token list, setting context.s
        self.pre_scan()

        # Init ivars first.
        self.input_token = None
        self.pending_lws = ''
        self.pending_ws = ''
        self.prev_output_kind = None
        self.prev_output_value = None

        # Init state.
        self.gen_token('file-start', '')
        self.push_state('file-start')

        # The main loop:
        prev_line_number: int = 0
        for self.index, self.input_token in enumerate(input_tokens):
            # Set global for visitors.
            if prev_line_number != self.input_token.line_number:
                prev_line_number = self.input_token.line_number
            # Call the proper visitor.
            if self.verbatim:
                self.do_verbatim()
            else:
                func = getattr(self, f"do_{self.input_token.kind}", self.no_visitor)
                func()

        # Return the result.
        result = ''.join(self.output_list)
        return result

    # Make no change if there is any error.
    except InternalBeautifierError as e:  # pragma: no cover
        # oops calls self.internal_error_message to creates e.
        print(repr(e))
    except AssertionError as e:  # pragma: no cover
        print(f"AssertionError: {e!r}")
        self.internal_error_message()
    return contents

# @+node:ekr.20240112023403.1: *9* << tbo.beautify: init ivars >>
# Debugging vars...
self.contents = contents
self.filename = filename
self.line_number: Optional[int] = None

# The input and output lists...
self.output_list: list[str] = []
self.input_tokens = input_tokens  # The list of input tokens.

# State vars for whitespace.
self.curly_brackets_level = 0  # Number of unmatched '{' tokens.
self.paren_level = 0  # Number of unmatched '(' tokens.
self.square_brackets_stack: list[bool] = []  # A stack of bools, for self.gen_word().
self.indent_level = 0  # Set only by do_indent and do_dedent.

# Parse state.
self.decorator_seen = False  # Set by do_name for do_op.
self.in_arg_list = 0  # > 0 if in an arg list of a def.
self.in_doc_part = False
self.state_stack: list[ParseState] = []  # Stack of ParseState objects.

# Leo-related state.
self.verbatim = False  # True: don't beautify.

# Ivars describing the present input token...
self.index = 0  # The index within the tokens array of the token being scanned.
self.lws = ''  # Leading whitespace. Required!
# @+node:ekr.20240105145241.6: *8* tbo.beautify_file (entry) (stats & diffs)
def beautify_file(self, filename: str) -> bool:  # pragma: no cover
    """
    TokenBasedOrange: Beautify the the given external file.

    Return True if the file was beautified.
    """
    if 0:
        print(
            f"all: {int(self.all)} "
            f"beautified: {int(self.beautified)} "
            f"diff: {int(self.diff)} "
            f"report: {int(self.report)} "
            f"write: {int(self.write)} "
            f"{g.shortFileName(filename)}"
        )
    self.filename = filename
    contents, tokens = self.init_tokens_from_file(filename)
    if not (contents and tokens):
        return False  # Not an error.
    if self.nobeautify_sentinel_pat.search(contents):
        return False  # Honor @nobeautify sentinel within the file.
    if not isinstance(tokens[0], InputToken):
        self.oops(f"Not an InputToken: {tokens[0]!r}")

    # Beautify the contents, returning the original contents on any error.
    results = self.beautify(contents, filename, tokens)

    # Ignore changes only to newlines.
    if self.regularize_newlines(contents) == self.regularize_newlines(results):
        return False

    # Print reports.
    if self.beautified:  # --beautified.
        print(f"tbo: beautified: {g.shortFileName(filename)}")
    if self.diff:  # --diff.
        print(f"Diffs: {filename}")
        self.show_diffs(contents, results)

    # Write the (changed) file .
    if self.write:  # --write.
        self.write_file(filename, results)
    return True

# @+node:ekr.20250508041634.1: *8* tbo.beautify_script_tree (entry) and helper
def beautify_script_tree(self, root: Position) -> None:
    """Undoably beautify root's entire tree."""
    c = root.v.context
    u, undoType = c.undoer, 'beautify-script'
    first_p = c.p
    n_changed = 0
    for p in root.self_and_subtree():
        bunch = u.beforeChangeNodeContents(p)
        changed = self.beautify_script_node(p)
        if changed:
            if n_changed == 0:  # #4443.
                u.beforeChangeGroup(first_p, undoType)
            n_changed += 1
            u.afterChangeNodeContents(p, undoType, bunch)
    if n_changed:
        u.afterChangeGroup(root, undoType)
        c.redraw(root)
        if not g.unitTesting:
            g.es_print(f"Beautified {n_changed} node{g.plural(n_changed)}")

# @+node:ekr.20250508030747.1: *9* tbo.beautify_script_node
def beautify_script_node(self, p: Position) -> bool:
    """Beautify a single node"""

    # Patterns for lines that must be replaced.
    section_ref_pat = re.compile(r'(\s*)\<\<(.+)\>\>(.*)')
    nobeautify_pat = re.compile(r'(\s*)\@nobeautify(.*)')
    trailing_ws_pat = re.compile(r'(.*)#(.*)')

    # Part 1: Replace @others and section references with 'pass'
    #         This hack is valid!
    indices: list[int] = []  # Indices of replaced lines.
    contents: list[str] = []  # Contents after replacements.
    for i, s in enumerate(g.splitLines(p.b)):
        if m := section_ref_pat.match(s):
            contents.append(f"{m.group(1)}pass\n")
            indices.append(i)
        elif m := nobeautify_pat.match(s):
            return False
        else:
            contents.append(s)

    # Part 2: Beautify.
    self.indent_level = 0
    self.filename = 'beautify-script'
    contents_s = ''.join(contents)
    tokens = Tokenizer().make_input_tokens(contents_s)
    if not tokens:
        return False
    results_s: str = self.beautify(contents_s, self.filename, tokens)

    # Part 3: Undo replacements, regularize comments and clean trailing ws.
    body_lines: list[str] = g.splitLines(p.b)
    results: list[str] = g.splitLines(results_s)
    for i in indices:
        old_line = body_lines[i]
        if m := trailing_ws_pat.match(old_line):
            old_line = f"{m.group(1).rstrip()}  #{m.group(2)}"
        results[i] = old_line.rstrip() + '\n'

    # Part 4: Update the body if necessary.
    new_body = ''.join(results)
    changed = p.b.rstrip() != new_body.rstrip()
    if changed:
        p.b = new_body
    return changed

# @+node:ekr.20240105145241.8: *8* tbo.init_tokens_from_file
def init_tokens_from_file(self, filename: str) -> tuple[str, list[InputToken]]:  # pragma: no cover
    """
    Create the list of tokens for the given file.
    Return (contents, encoding, tokens).
    """
    self.indent_level = 0
    self.filename = filename
    t1 = time.perf_counter_ns()
    contents = g.readFile(filename)
    t2 = time.perf_counter_ns()
    if not contents:
        self.input_tokens = []
        return '', []
    t3 = time.perf_counter_ns()
    self.input_tokens = input_tokens = Tokenizer().make_input_tokens(contents)
    t4 = time.perf_counter_ns()
    if 0:
        print(f"       read: {(t2 - t1) / 1000000:6.2f} ms")
        print(f"make_tokens: {(t4 - t3) / 1000000:6.2f} ms")
        print(f"      total: {(t4 - t1) / 1000000:6.2f} ms")
    return contents, input_tokens

# @+node:ekr.20240105140814.12: *8* tbo.regularize_newlines
def regularize_newlines(self, s: str) -> str:
    """Regularize newlines within s."""
    return s.replace('\r\n', '\n').replace('\r', '\n')

# @+node:ekr.20240105140814.17: *8* tbo.write_file
def write_file(self, filename: str, s: str) -> None:  # pragma: no cover
    """
    Write the string s to the file whose name is given.

    Handle all exceptions.

    Before calling this function, the caller should ensure
    that the file actually has been changed.
    """
    try:
        s2 = g.toEncodedString(s)  # May raise exception.
        with open(filename, 'wb') as f:
            f.write(s2)
    except Exception as e:  # pragma: no cover
        print(f"Error {e!r}: {filename!r}")

# @+node:ekr.20200107040729.1: *8* tbo.show_diffs
def show_diffs(self, s1: str, s2: str) -> None:  # pragma: no cover
    """Print diffs between strings s1 and s2."""
    filename = self.filename
    lines = list(
        difflib.unified_diff(
            g.splitLines(s1),
            g.splitLines(s2),
            fromfile=f"Old {filename}",
            tofile=f"New {filename}",
        )
    )
    print('')
    print(f"Diffs for {filename}")
    for line in lines:
        print(line)

# @+node:ekr.20240105145241.9: *7* tbo: Visitors & generators
# Visitors (tbo.do_* methods) handle input tokens.
# Generators (tbo.gen_* methods) create zero or more output tokens.
# @+node:ekr.20240105145241.10: *8* tbo.do_comment
def do_comment(self) -> None:
    """Handle a comment token."""
    val = self.input_token.value
    << do_comment: update comment-related state >>

    # Generate the comment.
    self.pending_lws = ''
    self.pending_ws = ''
    entire_line = self.input_token.line.lstrip().startswith('#')
    if entire_line:
        # The comment includes all ws.
        # #1496: No further munging needed.
        val = self.input_token.line.rstrip()
        # #3056: Insure one space after '#' in non-sentinel comments.
        #        Do not change bang lines or '##' comments.
        if m := self.comment_pat.match(val):
            i = len(m.group(1))
            val = val[:i] + '# ' + val[i + 1 :]
    else:
        # Exactly two spaces before trailing comments.
        i = val.find('#')
        if i == -1:
            g.trace('OOPS', repr(val), g.callers())
        # Special case for ###.
        elif val[i:].startswith('###'):
            val = val[:i].rstrip() + '  ### ' + val[i + 3 :].strip()
        else:
            val = val[:i].rstrip() + '  # ' + val[i + 1 :].strip()
    self.gen_token('comment', val.rstrip())

# @+node:ekr.20240420034216.1: *9* << do_comment: update comment-related state >>
# Leo-specific code...
if self.node_pat.match(val):
    # Clear per-node state.
    self.in_doc_part = False
    self.verbatim = False
    self.decorator_seen = False
    # Do *not* clear other state, which may persist across @others.
    # self.curly_brackets_level = 0
    # self.in_arg_list = 0
    # self.indent_level = 0
    # self.lws = ''
    # self.paren_level = 0
    # self.square_brackets_stack = []
    # self.state_stack = []
else:
    # Keep track of verbatim mode.
    if self.beautify_pat.match(val):
        self.verbatim = False
    elif self.nobeautify_pat.match(val):
        self.verbatim = True
    # Keep trace of @doc parts, to honor the convention for splitting lines.
    if self.start_doc_pat.match(val):
        self.in_doc_part = True
    if self.end_doc_pat.match(val):
        self.in_doc_part = False
# @+node:ekr.20240111051726.1: *8* tbo.do_dedent
def do_dedent(self) -> None:
    """Handle dedent token."""
    # Note: other methods use self.indent_level.
    self.indent_level -= 1
    self.lws = self.indent_level * self.tab_width * ' '
    self.pending_lws = self.lws
    self.pending_ws = ''
    self.prev_output_kind = 'dedent'

# @+node:ekr.20240105145241.11: *8* tbo.do_encoding
def do_encoding(self) -> None:
    """Handle the encoding token."""

# @+node:ekr.20240105145241.12: *8* tbo.do_endmarker
def do_endmarker(self) -> None:
    """Handle an endmarker token."""

    # Ensure exactly one newline at the end of file.
    if self.prev_output_kind not in (
        'indent',
        'dedent',
        'line-indent',
        'newline',
    ):
        self.output_list.append('\n')
    self.pending_lws = ''  # Defensive.
    self.pending_ws = ''  # Defensive.

# @+node:ekr.20240105145241.14: *8* tbo.do_indent
consider_message = 'consider using python/Tools/scripts/reindent.py'

def do_indent(self) -> None:
    """Handle indent token."""

    # Only warn about indentation errors.
    if '\t' in self.input_token.value:  # pragma: no cover
        print(f"Found tab character in {self.filename}")
        print(self.consider_message)
    elif (len(self.input_token.value) % self.tab_width) != 0:  # pragma: no cover
        print(f"Indentation error in {self.filename}")
        print(self.consider_message)

    # Handle the token!
    new_indent = self.input_token.value
    old_indent = self.indent_level * self.tab_width * ' '
    if new_indent > old_indent:
        self.indent_level += 1
    elif new_indent < old_indent:  # pragma: no cover (defensive)
        print(f"\n===== do_indent: can not happen {new_indent!r}, {old_indent!r}")

    self.lws = new_indent
    self.pending_lws = self.lws
    self.pending_ws = ''
    self.prev_output_kind = 'indent'

# @+node:ekr.20240105145241.16: *8* tbo.do_name & generators
# @+node:ekr.20240418050017.1: *9* tbo.do_name
def do_name(self) -> None:
    """Handle a name token."""
    name = self.input_token.value

    # #4420: Special case for *_
    if name == '_':
        prev_token = self.input_tokens[self.index - 1]
        if prev_token.kind == 'op' and prev_token.value == '*':
            self.pending_ws = ''
            self.gen_token('word', name)
            self.gen_blank()
        else:
            self.gen_word(name)
    elif name in self.operator_keywords:
        self.gen_word_op(name)
    else:
        self.gen_word(name)

# @+node:ekr.20240105145241.40: *9* tbo.gen_word
def gen_word(self, s: str) -> None:
    """Add a word request to the code list."""
    assert s == self.input_token.value
    assert s and isinstance(s, str), repr(s)
    self.gen_blank()
    self.gen_token('word', s)
    self.gen_blank()

# @+node:ekr.20240107141830.1: *9* tbo.gen_word_op
def gen_word_op(self, s: str) -> None:
    """Add a word-op request to the code list."""
    assert s == self.input_token.value
    assert s and isinstance(s, str), repr(s)
    self.gen_blank()
    self.gen_token('word-op', s)
    self.gen_blank()

# @+node:ekr.20240105145241.17: *8* tbo.do_newline & do_nl
# @+node:ekr.20240418043826.1: *9* tbo.do_newline
def do_newline(self) -> None:
    """
    do_newline: Handle a regular newline.

    From https://docs.python.org/3/library/token.html

    NEWLINE tokens end *logical* lines of Python code.
    """

    # #4349: Remove trailing ws.
    while self.input_tokens:
        last_token = self.input_tokens[-1]
        if last_token.kind == 'ws':
            self.input_tokens.pop()
        else:
            break

    self.output_list.append('\n')
    self.pending_lws = ''  # Set only by 'dedent', 'indent' or 'ws' tokens.
    self.pending_ws = ''
    self.prev_output_kind = 'newline'
    self.prev_output_value = '\n'

# @+node:ekr.20240418043827.1: *9* tbo.do_nl
def do_nl(self) -> None:
    """
    do_nl: Handle a continuation line.

    From https://docs.python.org/3/library/token.html

    NL tokens end *physical* lines. They appear when when a logical line of
    code spans multiple physical lines.
    """
    self.do_newline()

# @+node:ekr.20240105145241.18: *8* tbo.do_number
def do_number(self) -> None:
    """Handle a number token."""
    self.gen_blank()
    self.gen_token('number', self.input_token.value)

# @+node:ekr.20240105145241.19: *8* tbo.do_op & generators
# @+node:ekr.20240418045924.1: *9* tbo.do_op
def do_op(self) -> None:
    """Handle an op token."""
    val = self.input_token.value

    if val == '.':
        self.gen_dot_op()
    elif val == '@':
        self.gen_token('op-no-blanks', val)
        self.push_state('decorator')
    elif val == ':':
        # Treat slices differently.
        self.gen_colon()
    elif val in ',;':
        # Pep 8: Avoid extraneous whitespace immediately before
        # comma, semicolon, or colon.
        self.pending_ws = ''
        self.gen_token('op', val)
        self.gen_blank()
    elif val in '([{':
        # Pep 8: Avoid extraneous whitespace immediately inside
        # parentheses, brackets or braces.
        self.gen_lt()
    elif val in ')]}':
        # Ditto.
        self.gen_rt()
    elif val == '=':
        self.gen_equal_op()
    elif val in '~+-':
        self.gen_possible_unary_op()
    elif val == '*':
        self.gen_star_op()
    elif val == '**':
        self.gen_star_star_op()
    else:
        # Pep 8: always surround binary operators with a single space.
        # '==','+=','-=','*=','**=','/=','//=','%=','!=','<=','>=','<','>',
        # '^','~','*','**','&','|','/','//',
        # Pep 8: If operators with different priorities are used, consider
        # adding whitespace around the operators with the lowest priorities.
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.31: *9* tbo.gen_colon & helper
def gen_colon(self) -> None:
    """Handle a colon."""
    val = self.input_token.value
    context = self.input_token.context

    self.pending_ws = ''
    if context == 'complex-slice':
        if self.prev_output_value not in '[:':
            self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()
    elif context == 'simple-slice':
        self.gen_token('op-no-blanks', val)
    elif context == 'dict':
        self.gen_token('op', val)
        self.gen_blank()
    else:
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240109035004.1: *9* tbo.gen_dot_op & _next
def gen_dot_op(self) -> None:
    """Handle the '.' input token."""
    context = self.input_token.context

    # Get the previous significant **input** token.
    # This is the only call to next(i) anywhere!
    next_i = self._next(self.index)
    next = 'None' if next_i is None else self.input_tokens[next_i]
    import_is_next = next and next.kind == 'name' and next.value == 'import'  # type:ignore

    if context == 'import':
        if self.prev_output_kind == 'word' and self.prev_output_value in (
            'from',
            'import',
        ):
            self.gen_blank()
            op = 'op' if import_is_next else 'op-no-blanks'
            self.gen_token(op, '.')
        elif import_is_next:
            self.gen_token('op', '.')
            self.gen_blank()
        else:
            self.pending_ws = ''
            self.gen_token('op-no-blanks', '.')
    else:
        self.pending_ws = ''
        self.gen_token('op-no-blanks', '.')

# @+node:ekr.20240105145241.43: *10* tbo._next
def _next(self, i: int) -> Optional[int]:
    """
    Return the next *significant* input token.

    Ignore insignificant tokens: whitespace, indentation, comments, etc.

    The **Global Token Ratio** is tbo.n_scanned_tokens / len(tbo.tokens),
    where tbo.n_scanned_tokens is the total number of calls calls to
    tbo.next or tbo.prev.

    For Leo's sources, this ratio ranges between 0.48 and 1.51!

    The orange_command function warns if this ratio is greater than 2.5.
    Previous versions of this code suffered much higher ratios.
    """
    i += 1
    while i < len(self.input_tokens):
        token = self.input_tokens[i]
        if token.kind not in self.insignificant_tokens:
            # g.trace(f"token: {token!r}")
            return i
        i += 1
    return None  # pragma: no cover

# @+node:ekr.20240105145241.20: *9* tbo.gen_equal_op
def gen_equal_op(self) -> None:
    val = self.input_token.value
    context = self.input_token.context

    if context == 'initializer':
        # Pep 8: Don't use spaces around the = sign when used to indicate
        #        a keyword argument or a default parameter value.
        #        However, when combining an argument annotation with a default value,
        #        *do* use spaces around the = sign.
        self.pending_ws = ''
        self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.35: *9* tbo.gen_lt
def gen_lt(self) -> None:
    """Generate code for a left paren or curly/square bracket."""
    val = self.input_token.value
    assert val in '([{', repr(val)

    # Update state vars.
    if val == '(':
        self.paren_level += 1
    elif val == '[':
        self.square_brackets_stack.append(False)
    else:
        self.curly_brackets_level += 1

    # Generate or suppress the leading blank.
    # Update self.in_arg_list if necessary.
    if self.input_token.context == 'import':
        self.gen_blank()
    elif self.prev_output_kind in ('op', 'word-op'):
        self.gen_blank()
    elif self.prev_output_kind == 'word':
        # Only suppress blanks before '(' or '[' for non-keywords.
        if val == '{' or self.prev_output_value in (
            'if',
            'else',
            'elif',
            'return',
            'for',
            'while',
        ):
            self.gen_blank()
        elif val == '(':
            self.in_arg_list += 1
            self.pending_ws = ''
        else:
            self.pending_ws = ''
    elif self.prev_output_kind != 'line-indent':
        self.pending_ws = ''

    # Output the token!
    self.gen_token('op-no-blanks', val)

# @+node:ekr.20240105145241.37: *9* tbo.gen_possible_unary_op & helper
def gen_possible_unary_op(self) -> None:
    """Add a unary or binary op to the token list."""
    val = self.input_token.value
    if self.is_unary_op(self.index, val):
        prev = self.input_token
        if prev.kind == 'lt':
            self.gen_token('op-no-blanks', val)
        else:
            self.gen_blank()
            self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240109082712.1: *10* tbo.is_unary_op & _prev
def is_unary_op(self, i: int, val: str) -> bool:
    if val == '~':
        return True
    if val not in '+-':  # pragma: no cover
        return False

    # Get the previous significant **input** token.
    # This is the only call to _prev(i) anywhere!
    prev_i = self._prev(i)
    prev_token = None if prev_i is None else self.input_tokens[prev_i]
    kind = prev_token.kind if prev_token else ''
    value = prev_token.value if prev_token else ''

    if kind in ('number', 'string'):
        return_val = False
    elif kind == 'op' and value in ')]':
        return_val = False
    elif kind == 'op' and value in '{([:':
        return_val = True
    elif kind != 'name':
        return_val = True
    else:
        # The hard case: prev_token is a 'name' token.
        # Any Python keyword indicates a unary operator.
        return_val = keyword.iskeyword(value) or keyword.issoftkeyword(value)
    return return_val

# @+node:ekr.20240115233050.1: *11* tbo._prev
def _prev(self, i: int) -> Optional[int]:
    """
    Return the previous *significant* input token.

    Ignore insignificant tokens: whitespace, indentation, comments, etc.
    """
    i -= 1
    while i >= 0:
        token = self.input_tokens[i]
        if token.kind not in self.insignificant_tokens:
            return i
        i -= 1
    return None  # pragma: no cover

# @+node:ekr.20240105145241.36: *9* tbo.gen_rt
def gen_rt(self) -> None:
    """Generate code for a right paren or curly/square bracket."""
    val = self.input_token.value
    assert val in ')]}', repr(val)

    # Update state vars.
    if val == ')':
        self.paren_level -= 1
        self.in_arg_list = max(0, self.in_arg_list - 1)
        if self.prev_output_kind != 'line-indent':
            self.pending_ws = ''
    elif val == ']':
        self.square_brackets_stack.pop()
        if self.prev_output_kind != 'line-indent':
            self.pending_ws = ''
    else:
        self.curly_brackets_level -= 1
        # A hack: put a space before a comma.
        last = self.output_list[-1]
        if last == ',':
            self.pending_ws = ' '
        elif self.prev_output_kind != 'line-indent':
            self.pending_ws = ''
    self.gen_token('rt', val)

# @+node:ekr.20240105145241.38: *9* tbo.gen_star_op
def gen_star_op(self) -> None:
    """Put a '*' op, with special cases for *args."""
    val = self.input_token.value
    context = self.input_token.context

    if context == 'arg':
        self.gen_blank()
        self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.39: *9* tbo.gen_star_star_op
def gen_star_star_op(self) -> None:
    """Put a ** operator, with a special case for **kwargs."""
    val = self.input_token.value
    context = self.input_token.context

    if context == 'arg':
        self.gen_blank()
        self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.3: *9* tbo.push_state
def push_state(self, kind: str, value: Union[int, str, None] = None) -> None:
    """Append a state to the state stack."""
    state = ParseState(kind, value)
    self.state_stack.append(state)

# @+node:ekr.20240105145241.21: *8* tbo.do_string
def do_string(self) -> None:
    """
    Handle a 'string' token.

    The Tokenizer converts all f-string tokens to a single 'string' token.
    """
    # Careful: continued strings may contain '\r'
    val = self.regularize_newlines(self.input_token.value)
    if val.startswith(('"""', "'''")):
        # #4346: Strip trailing ws in docstrings.
        while ' \n' in val:
            val = val.replace(' \n', '\n')
    self.gen_token('string', val)
    self.gen_blank()

# @+node:ekr.20250731204857.1: *8* tbo.do_tstring_start
def do_tstring_start(self) -> None:
    """
    Handle a tstring_start token.

    do_verbatim handles tstring_middle and tstring_end tokens.
    """
    self.verbatim = True
    val = self.input_token.value
    self.gen_token('verbatim', val)

# @+node:ekr.20240105145241.22: *8* tbo.do_verbatim
def do_verbatim(self) -> None:
    """
    Handle one token in verbatim mode.
    End verbatim mode when the appropriate comment is seen.
    """
    kind = self.input_token.kind

    # Careful: tokens may contain '\r'
    val = self.regularize_newlines(self.input_token.value)
    if kind == 'comment':
        if self.beautify_pat.match(val):
            self.verbatim = False
        val = val.rstrip()
        self.gen_token('comment', val)
        return
    if kind == 'tstring_middle':
        self.gen_token('verbatim', val)
        return
    if kind == 'tstring_end':
        self.gen_token('tstring_middle', val)
        self.verbatim = False
        return
    if kind == 'indent':
        self.indent_level += 1
        self.lws = self.indent_level * self.tab_width * ' '
    if kind == 'dedent':
        self.indent_level -= 1
        self.lws = self.indent_level * self.tab_width * ' '
    self.gen_token('verbatim', val)

# @+node:ekr.20240105145241.23: *8* tbo.do_ws
def do_ws(self) -> None:
    """
    Handle the "ws" pseudo-token.  See Tokenizer.itok.do_token (the gem).

    Put the whitespace only if if ends with backslash-newline.
    """
    val = self.input_token.value
    last_token = self.input_tokens[self.index - 1]

    if last_token.kind in ('nl', 'newline'):
        self.pending_lws = val
        self.pending_ws = ''
    elif '\\\n' in val:
        self.pending_lws = ''
        self.pending_ws = val
    else:
        self.pending_ws = ' ' if val else ''  # #4346.

# @+node:ekr.20240105145241.27: *8* tbo.gen_blank
def gen_blank(self) -> None:
    """
    Queue a *request* for a blank.
    Change *neither* prev_output_kind *nor* pending_lws.
    """
    prev_kind = self.prev_output_kind
    if prev_kind == 'op-no-blanks':
        # A demand that no blank follows this op.
        self.pending_ws = ''
    elif prev_kind == 'hard-blank':
        # Eat any further blanks.
        self.pending_ws = ''
    elif prev_kind in (
        'dedent',
        'file-start',
        'indent',
        'line-indent',
        'newline',
    ):
        # Suppress the blank, but do *not* change the pending ws.
        pass
    elif self.pending_ws:
        # Use the existing pending ws.
        pass
    else:
        self.pending_ws = ' '

# @+node:ekr.20240105145241.26: *8* tbo.gen_token
def gen_token(self, kind: str, value: str) -> None:
    """Add an output token to the code list."""

    if self.pending_lws:
        self.output_list.append(self.pending_lws)
    elif self.pending_ws:
        self.output_list.append(self.pending_ws)

    self.output_list.append(value)
    self.pending_lws = ''
    self.pending_ws = ''
    self.prev_output_value = value
    self.prev_output_kind = kind

# @+node:ekr.20240110205127.1: *7* tbo: Scanning
# The parser calls scanner methods to move through the list of input tokens.
# @+node:ekr.20240128114622.1: *8* tbo.pre_scan & helpers
def pre_scan(self) -> None:
    """
    Scan the entire file in one iterative pass, adding context to a few
    kinds of tokens as follows:

    Token   Possible Contexts (or None)
    =====   ===========================
    ':'     'annotation', 'dict', 'complex-slice', 'simple-slice'
    '='     'annotation', 'initializer'
    '*'     'arg'
    '**'    'arg'
    '.'     'import'
    """

    # The main loop.
    in_import = False
    scan_stack: list[ScanState] = []
    prev_token: Optional[InputToken] = None
    for i, token in enumerate(self.input_tokens):
        kind, value = token.kind, token.value
        if kind in 'newline':
            << pre-scan 'newline' tokens >>
        elif kind == 'op':
            << pre-scan 'op' tokens >>
        elif kind == 'name':
            << pre-scan 'name' tokens >>
        # Remember the previous significant token.
        if kind not in self.insignificant_kinds:
            prev_token = token
    # Sanity check.
    if scan_stack:  # pragma: no cover
        print('pre_scan: non-empty scan_stack')
        print(scan_stack)

# @+node:ekr.20240128230812.1: *9* << pre-scan 'newline' tokens >>
# 'import' and 'from x import' statements may span lines.
# 'ws' tokens represent continued lines like this:   ws: ' \\\n    '
if in_import and not scan_stack:
    in_import = False
# @+node:ekr.20240128123117.1: *9* << pre-scan 'op' tokens >>
top_state: Optional[ScanState] = scan_stack[-1] if scan_stack else None

# Handle '[' and ']'.
if value == '[':
    scan_stack.append(ScanState('slice', token))
elif value == ']':
    assert top_state and top_state.kind == 'slice'
    self.finish_slice(i, top_state)
    scan_stack.pop()

# Handle '{' and '}'.
if value == '{':
    scan_stack.append(ScanState('dict', token))
elif value == '}':
    assert top_state and top_state.kind == 'dict'
    self.finish_dict(i, top_state)
    scan_stack.pop()

# Handle '(' and ')'
elif value == '(':
    if self.is_python_keyword(prev_token) or prev_token and prev_token.kind != 'name':
        state_kind = '('
    else:
        state_kind = 'arg'
    scan_stack.append(ScanState(state_kind, token))
elif value == ')':
    assert top_state and top_state.kind in ('(', 'arg'), repr(top_state)
    if top_state.kind == 'arg':
        self.finish_arg(i, top_state)
    else:
        self.finish_paren(i, top_state)
    scan_stack.pop()

# Handle interior tokens in 'arg' and 'slice' states.
if top_state:
    if top_state.kind in ('dict', 'slice') and value == ':':
        top_state.value.append(i)
    if top_state.kind == 'arg' and value in '**=:,':
        top_state.value.append(i)

# Handle '.' and '(' tokens inside 'import' and 'from' statements.
if in_import and value in '(.':
    self.set_context(i, 'import')
# @+node:ekr.20240128231119.1: *9* << pre-scan 'name' tokens >>
prev_is_yield = prev_token and prev_token.kind == 'name' and prev_token.value == 'yield'
if value in ('from', 'import') and not prev_is_yield:
    # 'import' and 'from x import' statements should be at the outer level.
    assert not scan_stack, scan_stack
    in_import = True
# @+node:ekr.20240129041304.1: *9* tbo.finish_arg
def finish_arg(self, end: int, state: Optional[ScanState]) -> None:
    """Set context for '=', ':', '*', and '**' tokens when scanning from '(' to ')'."""

    # Sanity checks.
    if not state:
        return
    assert state.kind == 'arg', repr(state)
    token = state.token
    assert token.value == '(', repr(token)
    values = state.value
    assert isinstance(values, list), repr(values)
    i1 = token.index
    assert i1 < end, (i1, end)
    if not values:
        return

    # Compute the context for each *separate* '=' token.
    equal_context = 'initializer'
    for i in values:
        token = self.input_tokens[i]
        assert token.kind == 'op', repr(token)
        if token.value == ',':
            equal_context = 'initializer'
        elif token.value == ':':
            equal_context = 'annotation'
        elif token.value == '=':
            self.set_context(i, equal_context)
            equal_context = 'initializer'

    # Set the context of all outer-level ':', '*', and '**' tokens.
    prev: Optional[InputToken] = None
    for i in range(i1, end):
        token = self.input_tokens[i]
        if token.kind not in self.insignificant_kinds:
            if token.kind == 'op':
                if token.value in ('*', '**'):
                    if self.is_unary_op_with_prev(prev, token):
                        self.set_context(i, 'arg')
                elif token.value == '=':
                    # The code above has set the context.
                    assert token.context in ('initializer', 'annotation'), (
                        i,
                        repr(token.context),
                    )
                elif token.value == ':':
                    self.set_context(i, 'annotation')
            prev = token

# @+node:ekr.20250507041900.1: *9* tbo.finish_paren
def finish_paren(self, end: int, state: Optional[ScanState]) -> None:
    """Set context for '=' tokens when scanning from '(' to ')'."""

    # Sanity checks.
    if not state:
        return
    assert state.kind == '(', repr(state)
    token = state.token
    assert token.value == '(', repr(token)
    i1 = token.index
    assert i1 < end, (i1, end)

    # Compute the context for each *separate* '=' token.
    for token in self.input_tokens[i1:end]:
        if token.kind == 'op' and token.value == '=':
            self.set_context(token.index, 'initializer')

# @+node:ekr.20240128233406.1: *9* tbo.finish_slice
def finish_slice(self, end: int, state: ScanState) -> None:
    """Set context for all ':' when scanning from '[' to ']'."""

    # Sanity checks.
    assert state.kind == 'slice', repr(state)
    token = state.token
    assert token.value == '[', repr(token)
    colons = state.value
    assert isinstance(colons, list), repr(colons)
    i1 = token.index
    assert i1 < end, (i1, end)

    # Do nothing if there are no ':' tokens in the slice.
    if not colons:
        return

    # Compute final context by scanning the tokens.
    final_context = 'simple-slice'
    inter_colon_tokens = 0
    prev = token
    for i in range(i1 + 1, end - 1):
        token = self.input_tokens[i]
        kind, value = token.kind, token.value
        if kind not in self.insignificant_kinds:
            if kind == 'op':
                if value == ':':
                    inter_colon_tokens = 0
                elif value in '-+':
                    # Ignore unary '-' or '+' tokens.
                    if not self.is_unary_op_with_prev(prev, token):
                        inter_colon_tokens += 1
                        if inter_colon_tokens > 1:
                            final_context = 'complex-slice'
                            break
                elif value == '~':
                    # '~' is always a unary op.
                    pass
                else:
                    # All other ops contribute.
                    inter_colon_tokens += 1
                    if inter_colon_tokens > 1:
                        final_context = 'complex-slice'
                        break
            else:
                inter_colon_tokens += 1
                if inter_colon_tokens > 1:
                    final_context = 'complex-slice'
                    break
            prev = token

    # Set the context of all outer-level ':' tokens.
    for i in colons:
        self.set_context(i, final_context)

# @+node:ekr.20240129040347.1: *9* tbo.finish_dict
def finish_dict(self, end: int, state: Optional[ScanState]) -> None:
    """
    Set context for all ':' when scanning from '{' to '}'

    Strictly speaking, setting this context is unnecessary because
    tbo.gen_colon generates the same code regardless of this context.

    In other words, this method can be a do-nothing!
    """

    # Sanity checks.
    if not state:
        return
    assert state.kind == 'dict', repr(state)
    token = state.token
    assert token.value == '{', repr(token)
    colons = state.value
    assert isinstance(colons, list), repr(colons)
    i1 = token.index
    assert i1 < end, (i1, end)

    # Set the context for all ':' tokens.
    for i in colons:
        self.set_context(i, 'dict')

# @+node:ekr.20240129034209.1: *8* tbo.is_unary_op_with_prev
def is_unary_op_with_prev(self, prev: Optional[InputToken], token: InputToken) -> bool:
    """
    Return True if token is a unary op in the context of prev, the previous
    significant token.
    """
    if token.value == '~':  # pragma: no cover
        return True
    if prev is None:
        return True  # pragma: no cover
    assert token.value in '**-+', repr(token.value)
    if prev.kind in ('number', 'string'):
        return_val = False
    elif prev.kind == 'op' and prev.value in ')]':
        # An unnecessary test?
        return_val = False  # pragma: no cover
    elif prev.kind == 'op' and prev.value in '{([:,':
        return_val = True
    elif prev.kind != 'name':
        # An unnecessary test?
        return_val = True  # pragma: no cover
    else:
        # prev is a'name' token.
        return self.is_python_keyword(token)
    return return_val

# @+node:ekr.20240129035336.1: *8* tbo.is_python_keyword
def is_python_keyword(self, token: Optional[InputToken]) -> bool:
    """Return True if token is a 'name' token referring to a Python keyword."""
    if not token or token.kind != 'name':
        return False
    return keyword.iskeyword(token.value) or keyword.issoftkeyword(token.value)

# @+node:ekr.20240106170746.1: *8* tbo.set_context
def set_context(self, i: int, context: str) -> None:
    """
    Set self.input_tokens[i].context, but only if it does not already exist!

    See the docstring for pre_scan for details.
    """

    trace = False  # Do not delete the trace below.

    valid_contexts = (
        'annotation',
        'arg',
        'complex-slice',
        'simple-slice',
        'dict',
        'import',
        'initializer',
    )
    if context not in valid_contexts:
        self.oops(f"Unexpected context! {context!r}")  # pragma: no cover

    token = self.input_tokens[i]

    if trace:  # pragma: no cover
        token_s = f"<{token.kind}: {token.show_val(12)}>"
        ignore_s = 'Ignore' if token.context else ' ' * 6
        print(f"{i:3} {ignore_s} token: {token_s} context: {context}")

    if not token.context:
        token.context = context

# @+node:ekr.20240105151507.1: *4* ../unittests/core/test_leoTokens.py
"""Tests of leoTokens.py"""

<< test_leoTokens imports >>
v1, v2, junk1, junk2, junk3 = sys.version_info
py_version = (v1, v2)

@others
# @+node:ekr.20240105151507.2: *5* << test_leoTokens imports >>
import os
import sys
import textwrap
import unittest
import warnings

try:
    # Suppress a warning about imp being deprecated.
    with warnings.catch_warnings():
        import black
except Exception:  # pragma: no cover
    black = None

from leo.core import leoGlobals as g

# Classes to test.
from leo.core.leoTokens import InputToken, Tokenizer, TokenBasedOrange

# Utility functions.
from leo.core.leoTokens import dump_contents, dump_tokens, input_tokens_to_string

warnings.simplefilter("ignore")
# @+node:ekr.20240105153420.2: *5* class BaseTest (TestCase)
# Do *not* use LeoUnitTest as the base class.
# Doing so slows downt testing considerably.


class BaseTest(unittest.TestCase):
    """
    The base class of all tests of leoTokens.py.

    This class contains only helpers.
    """

    debug_list: list[str] = []

    def setUp(self) -> None:
        g.unitTesting = True

    @others


# @+node:ekr.20240105153420.13: *6* BaseTest.beautify
def beautify(
    self,
    contents,
    tokens,
    filename=None,
    max_join_line_length=None,
    max_split_line_length=None,
):
    """BaseTest.beautify."""
    if not contents:
        return ''  # pragma: no cover
    if not filename:
        filename = g.callers(2).split(',')[0]

    orange = TokenBasedOrange()
    result_s = orange.beautify(contents, filename, tokens)
    self.output_list = orange.output_list
    return result_s

# @+node:ekr.20240105153420.4: *6* BaseTest.check_roundtrip
def check_roundtrip(self, contents, *, debug_list: list[str] = None):
    """Check that the tokenizer round-trips the given contents."""
    # Several unit tests call this method.

    contents, tokens = self.make_data(contents, debug_list=debug_list)
    results = input_tokens_to_string(tokens)
    self.assertEqual(contents, results)

# @+node:ekr.20240105153425.44: *6* BaseTest.make_data (test_leoTokens.py)
def make_data(
    self,
    contents: str,
    *,
    description: str = None,
    debug_list: str = None,
) -> tuple[str, list[InputToken]]:  # pragma: no cover
    """
    BaseTest.make_data. Prepare the data for one unit test:
    - Regularize the contents:
      contents = textwrap.dedent(contents).strip() + '\n'
    - Tokenize the contents using the Tokenizer class in leoTokens.py.
    - Dump the contents or tokens per the debug_list kwarg.
    - Return (contents, tokens)
    """
    assert contents.strip(), g.callers()

    # Set debug flags and counts.
    self.debug_list = debug_list or []
    self.trace_token_method = False

    # Check the debug_list.
    valid = ('contents', 'debug', 'tokens')
    for z in self.debug_list:
        if z not in valid:
            g.trace('Ignoring debug_list value:', z)

    # Ensure all tests start with a non-blank line and end in exactly one newline.
    contents = textwrap.dedent(contents).strip() + '\n'

    # Create the tokens.
    tokens = Tokenizer().make_input_tokens(contents)
    if not tokens:
        self.fail('BaseTest.make_data:Tokenizer().make_input_tokens failed')

    # Dumps.
    if 'contents' in self.debug_list:
        dump_contents(contents)
    if 'tokens' in self.debug_list:
        dump_tokens(tokens)
    return contents, tokens

# @+node:ekr.20240105153420.6: *6* BaseTest.make_file_data
def make_file_data(self, filename: str) -> tuple[str, list[InputToken]]:
    """Return (contents, tokens, tree) from the given file."""
    directory = os.path.dirname(__file__)
    filename = g.finalize_join(directory, '..', '..', 'core', filename)
    assert os.path.exists(filename), repr(filename)
    contents = g.readFileIntoUnicodeString(filename)
    contents, tokens = self.make_data(contents, description=filename)
    return contents, tokens

# @+node:ekr.20240205025458.1: *6* BaseTest.prep
def prep(self, s: str) -> str:
    """
    Return the "prepped" version of s.

    This should eliminate the need for backslashes in tests.
    """
    return textwrap.dedent(s).strip() + '\n'

# @+node:ekr.20240105153425.2: *5* class Optional_TestFiles (BaseTest)
class Optional_TestFiles(BaseTest):
    """
    Tests for the TokenOrderGenerator class that act on files.

    These are optional tests. They take a long time and are not needed
    for 100% coverage.

    All of these tests failed at one time.
    """

    @others


# @+node:ekr.20240105153425.3: *6* TestFiles.test_leoApp
def test_leoApp(self):
    self.make_file_data('leoApp.py')

# @+node:ekr.20240105153425.4: *6* TestFiles.test_leoAst
def test_leoAst(self):
    self.make_file_data('leoAst.py')

# @+node:ekr.20240105153425.5: *6* TestFiles.test_leoDebugger
def test_leoDebugger(self):
    self.make_file_data('leoDebugger.py')

# @+node:ekr.20240105153425.6: *6* TestFiles.test_leoFind
def test_leoFind(self):
    self.make_file_data('leoFind.py')

# @+node:ekr.20240105153425.7: *6* TestFiles.test_leoGlobals
def test_leoGlobals(self):
    self.make_file_data('leoGlobals.py')

# @+node:ekr.20240105153425.8: *6* TestFiles.test_leoTips
def test_leoTips(self):
    self.make_file_data('leoTips.py')

# @+node:ekr.20240105153425.9: *6* TestFiles.test_runLeo
def test_runLeo(self):
    self.make_file_data('runLeo.py')

# @+node:ekr.20240105153425.42: *5* class TestTokenBasedOrange (BaseTest)
class TestTokenBasedOrange(BaseTest):
    """
    Tests for the TokenBasedOrange class.

    Note: TokenBasedOrange never inserts or deletes lines.
    """

    @others


# @+node:ekr.20240105153425.43: *6* TestTBO.blacken
def blacken(self, contents):
    """Return the results of running black on contents"""
    if not black:
        self.skipTest('Requires Black')  # pragma: no cover
    # Suppress string normalization!
    try:
        mode = black.FileMode()
        mode.string_normalization = False
        # mode.line_length = line_length
    except TypeError:  # pragma: no cover
        self.skipTest('Requires newer version of Black')
    return black.format_str(contents, mode=mode)

# @+node:ekr.20240116104552.1: *6* TestTBO.slow_test_leoColorizer
def slow_test_leoApp(self) -> None:  # pragma: no cover
    # This test is no longer needed.
    # The tbo command beautifies all files.
    filename = 'leoColorizer.py'
    test_dir = os.path.dirname(__file__)
    path = g.os_path_finalize_join(test_dir, '..', '..', 'core', filename)
    assert os.path.exists(path), repr(path)

    tbo = TokenBasedOrange()
    tbo.filename = path

    if 0:  # Diff only.
        tbo.beautify_file(path)
    else:
        contents, tokens = self.make_file_data(path)
        expected = contents
        results = self.beautify(contents, tokens)

        regularized_expected = tbo.regularize_newlines(expected)
        regularized_results = tbo.regularize_newlines(results)

        if regularized_expected != regularized_results:
            tbo.show_diffs(regularized_expected, regularized_results)

        assert regularized_expected == regularized_results

# @+node:ekr.20240105153425.45: *6* TestTBO.test_annotations
def test_annotations(self):  # Required for full coverage.
    table = (
        # Case 0.
        """s: str = None\n""",
        # Case 1.
        (
            """
            def annotated_f(s: str = None, x=None) -> None:
                pass
        """
        ),
        # Case 2.
        (
            """
            def f1():
                self.rulesetName : str = ''
        """
        ),
    )
    for i, contents in enumerate(table):
        contents, tokens = self.make_data(contents)
        expected = self.blacken(contents).rstrip() + '\n'
        results = self.beautify(contents, tokens)
        if results != expected:  # pragma: no cover
            g.printObj(contents, tag='Contents')
            g.printObj(expected, tag='Expected (blackened)')
            g.printObj(results, tag='Results')
        self.assertEqual(results, expected)

# @+node:ekr.20240105153425.46: *6* TestTBO.test_at_doc_part
def test_at_doc_part(self):
    contents = """
@ Line 1
Line 2
@c

print('hi')
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.47: *6* TestTBO.test_backslash_newline
def test_backslash_newline(self):
    """
    This test is necessarily different from black, because orange doesn't
    delete semicolon tokens.
    """
    contents = r"""
print(a);\
print(b)
print(c); \
print(d)
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    # expected = self.blacken(contents).rstrip() + '\n'
    results = self.beautify(contents, tokens)
    # g.printObj(tokens, tag='Tokens')
    # g.printObj(results, tag='Results')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.48: *6* TestTBO.test_blank_lines_after_function
def test_blank_lines_after_function(self):
    contents = """
# Comment line 1.
# Comment line 2.

def spam():
    pass
    # Properly indented comment.

# Comment line3.
# Comment line4.
a = 2
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.49: *6* TestTBO.test_blank_lines_after_function_2
def test_blank_lines_after_function_2(self):
    contents = """
# Leading comment line 1.
# Leading comment lines 2.

def spam():
    pass

# Trailing comment line.
a = 2
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.50: *6* TestTBO.test_blank_lines_after_function_3
def test_blank_lines_after_function_3(self):
    # From leoAtFile.py.
    contents = """
def writeAsisNode(self, p):
    print('1')

    def put(s):
        print('2')

    # Trailing comment 1.
    # Trailing comment 2.
    print('3')
"""

    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.53: *6* TestTBO.test_comment_indented
def test_comment_indented(self):
    table = (
        """
        if 1:
            pass
                # An indented comment.
        """,
        """
        table = (
            # Regular comment.
        )
        """,
    )
    fails = 0
    for contents in table:
        contents, tokens = self.make_data(contents)
        expected = contents
        results = self.beautify(contents, tokens)
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}")
            g.printObj(results, tag='Results')
            g.printObj(expected, tag='Expected')
    assert not fails, fails

# @+node:ekr.20240105153425.54: *6* TestTBO.test_comment_space_after_delim
def test_comment_space_after_delim(self):
    table = (
        # Test 1.
        (
            """#No space after delim.\n""",
            """# No space after delim.\n""",
        ),
        # Test 2.  Don't change bang lines.
        (
            """#! /usr/bin/env python\n""",
            """#! /usr/bin/env python\n""",
        ),
        # Test 3.  Don't change ### comments.
        (
            """### To do.\n""",
            """### To do.\n""",
        ),
    )
    fails = 0
    for contents, expected in table:
        contents, tokens = self.make_data(contents)
        results = self.beautify(contents, tokens)
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    assert not fails, fails

# @+node:ekr.20240105153425.55: *6* TestTBO.test_decorators
def test_decorators(self):
    table = (
        # Case 0.
        """
@my_decorator(1)
def func():
    pass
""",
        # Case 1.
        """
if 1:
    @my_decorator
    def func():
        pass
""",
        # Case 2.
        '''
@g.commander_command('promote')
def promote(self, event=None, undoFlag=True):
    """Make all children of the selected nodes siblings of the selected node."""
''',
    )
    for i, contents in enumerate(table):
        contents, tokens = self.make_data(contents)
        expected = contents
        results = self.beautify(contents, tokens)
        if results != expected:
            g.trace('Fail:', i)  # pragma: no cover
            g.printObj(contents, tag='Contents')
            g.printObj(results, tag='Results')
        self.assertEqual(results, expected)

# @+node:ekr.20240105153425.56: *6* TestTBO.test_dont_delete_blank_lines
def test_dont_delete_blank_lines(self):
    contents = """
class Test:

    def test_func():

        pass

    a = 2
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20250506092303.1: *6* TestTBO.test_flake8_errors
def test_flake8_errors(self):
    tag = 'test_flake8_errors'

    # Part 1: tests w/o black.
    non_black_table = (
        # leoAbbrevCommands.py: line 160.
        """c.abbrev_subst_env = {'c': c, 'g': g, '_values': {}, }\n""",
    )
    for i, contents in enumerate(non_black_table):
        description = f"{tag} (w/o black) part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = contents
        results = self.beautify(contents, tokens, filename=description)
        if 0:  # pragma: no cover
            g.printObj(tokens, tag=description)
            g.printObj(expected, tag='Expected')
            g.printObj(results, tag='Results')
        if results != expected:  # pragma: no cover
            print('')
            print(
                f"TestTokenBasedOrange.{tag}: FAIL\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
        self.assertEqual(results, expected, msg=description)

    # All entries are expected values...
    black_table = (
        # leoserver.py: line 1575.
        """s = s[len('@nocolor') :]\n""",
        # leoGlobals.py.
        """g.pr(f"{self.calledDict.get(key,0):d}", key)""",  # line 1805
        """tracing_tags [id(self)] = tag""",  # line 1913.
        # make_stub_files.py.
        """def match(self, s, trace=False):\n    pass""",
    )
    for i, contents in enumerate(black_table):
        description = f"{tag} (black) part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            # g.printObj(tokens, tag='Tokens')
            g.printObj(expected, tag='Expected')
            g.printObj(results, tag='Results')
        self.assertEqual(results, expected, msg=description)

# @+node:ekr.20240105153425.65: *6* TestTBO.test_leo_sentinels
def test_leo_sentinels_1(self):
    # Careful: don't put a sentinel into the file directly.
    # That would corrupt leoAst.py.
    sentinel = '#@+node:ekr.20200105143308.54: ** test'
    contents = f"""
{sentinel}
def spam():
    pass
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.66: *6* TestTBO.test_leo_sentinels_2
def test_leo_sentinels_2(self):
    # Careful: don't put a sentinel into the file directly.
    # That would corrupt leoAst.py.
    sentinel = '#@+node:ekr.20200105143308.54: ** test'
    contents = f"""
{sentinel}
class TestClass:
    pass
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.67: *6* TestTBO.test_lines_before_class
def test_lines_before_class(self):
    contents = """
a = 2
class aClass:
    pass
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240128181802.1: *6* TestTBO.test_multi_line_imports
def test_multi_line_imports(self):
    # The space between 'import' and '(' is correct.
    contents = """
        from .module1 import (
            w1,
            w2,
        )
        from .module1 import \\
            w
        from .module1 import (
            w1,
            w2,
        )
        import leo.core.leoGlobals \\
            as g
    """
    contents, tokens = self.make_data(contents)
    expected = contents.strip() + '\n'
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.68: *6* TestTBO.test_multi_line_pet_peeves
def test_multi_line_pet_peeves(self):
    contents = """
        if x == 4: pass
        if x == 4 : pass
        print (x, y); x, y = y, x
        print (x , y) ; x , y = y , x
        if(1):
            pass
        elif(2):
            pass
        while(3):
            pass
    """
    # At present Orange doesn't split lines...
    expected = self.prep(
        """
            if x == 4: pass
            if x == 4: pass
            print(x, y); x, y = y, x
            print(x, y); x, y = y, x
            if (1):
                pass
            elif (2):
                pass
            while (3):
                pass
        """
    )
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240420050005.1: *6* TestTBO.test_multi_line_statement
def test_multi_line_statements(self):
    contents = """
        if 1:  # Simulate indent.
            def orange_command(
                s: str,
            ) -> None:
                pass
        if 1:  # The trailing ]) causes the problem.
            return ''.join(
                ['%s%s' % (sep, self.dump_ast(z, level + 1)) for z in node])
    """

    expected = self.prep(
        """
        if 1:  # Simulate indent.
            def orange_command(
                s: str,
            ) -> None:
                pass
        if 1:  # The trailing ]) causes the problem.
            return ''.join(
                ['%s%s' % (sep, self.dump_ast(z, level + 1)) for z in node])
        """
    )
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20250202043822.1: *6* TestTBO.test_nested_fstrings
def test_nested_fstrings(self):
    # https://github.com/leo-editor/leo-editor/issues/4289
    contents = """
        diff_str = f"{CMPS[cmp(old, new)]}{(diff and f'{diff:.2f}') or ''}"
    """

    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    # g.printObj(tokens, tag='Tokens')
    # g.printObj(results, tag='Results')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.69: *6* TestTBO.test_one_line_pet_peeves
def test_one_line_pet_peeves(self):
    # One-line pet peeves, except those involving slices and unary ops.

    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_one_line_pet_peeves'

    # Except where noted, all entries are expected values...
    table = (
        # #4420
        """default, *_ = node.infer()""",  # From pylint.
        # Assignments...
        """a = b * c""",
        """a = b + c""",
        """a = b - c""",
        # * and **, inside and outside function calls.
        """f(*args)""",
        """f(**kwargs)""",
        """f(*args, **kwargs)""",
        """f(a, *args)""",
        """f(a=2, *args)""",
        # Calls...
        """f(-1)""",
        """f(-1 < 2)""",
        """f(1)""",
        """f(2 * 3)""",
        """f(2 + name)""",
        """f(a)""",
        """f(a.b)""",
        """f(a=2 + 3, b=4 - 5, c= 6 * 7, d=8 / 9, e=10 // 11)""",
        """f(a[1 + 2])""",
        """f({key: 1})""",
        """t = (0,)""",
        """x, y = y, x""",
        # Dicts...
        """d = {key: 1}""",
        """d['key'] = a[i]""",
        # Trailing comments: expect two spaces.
        """whatever # comment""",
        """whatever  # comment""",
        """whatever   # comment""",
        # Word ops...
        """v1 = v2 and v3 if v3 not in v4 or v5 in v6 else v7""",
        """print(v7 for v8 in v9)""",
        # Returns...
        """return -1""",
    )
    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            print(
                f"TestTokenBasedOrange.{tag}: FAIL\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
        self.assertEqual(results, expected, msg=description)

# @+node:ekr.20240128002403.1: *6* TestTBO.test_percent_op
def test_percent_op(self):
    # leo/plugins/writers/basewriter.py, line 38
    contents = """at.os('%s@+node:%s%s' % (delim, s, delim2))"""
    contents, tokens = self.make_data(contents)
    expected = self.blacken(contents).rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.70: *6* TestTBO.test_relative_imports
def test_relative_imports(self):
    # #2533.
    contents = """
        from .module1 import w
        from . module2 import x
        from ..module1 import y
        from .. module2 import z
        from . import a
        from.import b
        from .. import c
        from..import d
        from leo.core import leoExternalFiles
        import leo.core.leoGlobals as g
    """
    expected = self.prep(
        """
        from .module1 import w
        from .module2 import x
        from ..module1 import y
        from ..module2 import z
        from . import a
        from . import b
        from .. import c
        from .. import d
        from leo.core import leoExternalFiles
        import leo.core.leoGlobals as g
    """
    )
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        # g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(expected, results)

# @+node:ekr.20240109090653.1: *6* TestTBO.test_slice
def test_slice(self):
    # Test one-line pet peeves involving slices.

    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_slice'

    # Except where noted, all entries are expected values....
    table = (
        # Differences between leoAst.py and leoTokens.py.
        # tbo.cmd changes several files, including these.
        # In all cases, the differences make leoTokens.py *more*
        # compatible with Black than leoAst.py!
        # #4420: From pylint.
        """base_name = ".".join(package.split(".", self.depth)[: self.depth])""",
        """ws = contents[self.prev_offset : s_offset]""",  # Follow-on test.
        # From leoAst.py.
        """val = val[:i] + '# ' + val[i + 1 :]\n""",
        # From leoApp.py.
        """
                for name in rf.getRecentFiles()[:n]:
                    pass
            """,
        # From leoUndo.py.
        """s.extend(body_lines[-trailing:])\n""",
        # From leoTokens.py.
        # The expected value of the two last lines is the last line.
        """
                if line1.startswith(tag) and line1.endswith(tag2):
                    e = line1[n1 : -n2].strip()
                    e = line1[n1:-n2].strip()
            """,
        # Legacy tests...
        """a[:-1]""",
        """a[: 1 if True else 2 :]""",
        """a[1 : 1 + 2]""",
        """a[lower:]""",
        """a[lower::]""",
        """a[:upper]""",
        """a[:upper:]""",
        """a[::step]""",
        """a[lower:upper:]""",
        """a[lower:upper:step]""",
        """a[lower + offset : upper + offset]""",
        """a[: upper_fn(x) :]""",
        """a[: upper_fn(x) : step_fn(x)]""",
        """a[:: step_fn(x)]""",
        """a[: upper_fn(x) :]""",
        """a[: upper_fn(x) : 2 + 1]""",
        """a[:]""",
        """a[::]""",
        """a[1:]""",
        """a[1::]""",
        """a[:2]""",
        """a[:2:]""",
        """a[::3]""",
        """a[1:2]""",
        """a[1:2:]""",
        """a[:2:3]""",
        """a[1:2:3]""",
    )

    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            print('TestTokenBasedOrange')
            # g.printObj(contents, tag='Contents')
            g.printObj(expected, tag='Expected (Black)')
            g.printObj(results, tag='Results')
        self.assertEqual(expected, results)

# @+node:ekr.20250505153051.1: *6* TestTBO.test_spaces_after_keyword
def test_spaces_after_keyword(self):
    contents = """return  ''\n"""
    expected = """return ''\n"""
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    assert results == expected

# @+node:ekr.20240105153425.76: *6* TestTBO.test_star_star_operator
def test_star_star_operator(self):
    # Don't rely on black for this test.
    contents = """a = b ** c"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20250505152406.1: *6* TestTBO.test_trailing_ws_in_docstring
def test_trailing_ws_in_docstring(self):
    expected = textwrap.dedent('''\
"""
This line contains trailing ws
"""
''')
    # Create the trailing ws by hand so flake8 doesn't complain!
    contents = expected.replace('ws', 'ws  ')
    assert contents != expected
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    assert results == expected

# @+node:ekr.20240109070553.1: *6* TestTBO.test_unary_ops
def test_unary_ops(self):
    # One-line pet peeves involving unary ops but *not* slices.

    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_unary_op'

    # All entries are expected values....
    table = (
        # Calls...
        """f(-1)""",
        """f(-1 < 2)""",
        # Dicts...
        """d = {key: -3}""",
        """d['key'] = a[-i]""",
        # Unary minus...
        """v = -1 if a < b else -2""",
        # ~
        """a = ~b""",
        """c = a[:~e:]""",
        # """d = a[f() - 1 :]""",
        # """e = a[2 - 1 :]""",
        # """e = a[b[2] - 1 :]""",
    )
    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            print(
                f"TestTokenBasedOrange.{tag}:\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
        self.assertEqual(results, expected, msg=description)

# @+node:ekr.20240105153425.79: *6* TestTBO.test_verbatim
def test_verbatim(self):
    contents = """
@nobeautify

def addOptionsToParser(self, parser, trace_m):

    add = parser.add_option

    def add_bool(option, help, dest=None):
        add(option, action='store_true', dest=dest, help=help)

    add_bool('--diff',          'use Leo as an external git diff')
    # add_bool('--dock',          'use a Qt dock')
    add_bool('--fullscreen',    'start fullscreen')
    add_bool('--init-docks',    'put docks in default positions')
    # Multiple bool values.
    add('-v', '--version', action='store_true',
        help='print version number and exit')

# From leoAtFile.py
noDirective     =  1 # not an at-directive.
allDirective    =  2 # at-all (4.2)
docDirective    =  3 # @doc.


"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20240105153425.80: *6* TestTBO.test_verbatim_with_pragma
def test_verbatim_with_pragma(self):
    contents = """
# pragma: no beautify

def addOptionsToParser(self, parser, trace_m):

    add = parser.add_option

    def add_bool(option, help, dest=None):
        add(option, action='store_true', dest=dest, help=help)

    add_bool('--diff',          'use Leo as an external git diff')
    # add_bool('--dock',          'use a Qt dock')
    add_bool('--fullscreen',    'start fullscreen')
    add_other('--window-size',  'initial window size (height x width)', m='SIZE')
    add_other('--window-spot',  'initial window position (top x left)', m='SPOT')
    # Multiple bool values.
    add('-v', '--version', action='store_true',
        help='print version number and exit')

# pragma: beautify
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20250505231955.1: *6* TestTBO.test_ws_in_blank_line
def test_ws_in_blank_line(self):
    contents = 'line 1\n   \nline 2\n'
    expected = 'line 1\n\nline 2\n'
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    assert results == expected

# @+node:ekr.20240105153425.81: *6* TestTBO.verbatim2
def test_verbatim2(self):
    # We *do* want this test to contain verbatim sentinels.
    contents = """
@beautify
###@nobeautify
@ Starts doc part
More doc part.
The @c ends the doc part.
@c
    """
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    # g.printObj(results, tag='Results')
    # g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20240105153425.85: *5* class TestTokens (BaseTest)
class TestTokens(BaseTest):
    """Unit tests for tokenizing."""

    @others


# @+node:ekr.20240105153425.93: *6* TT.show_example_dump
def show_example_dump(self):  # pragma: no cover
    # Will only be run when enabled explicitly.

    contents = """
print('line 1')
print('line 2')
print('line 3')
"""
    contents, tokens = self.make_data(contents)
    dump_contents(contents)
    dump_tokens(tokens)

# @+node:ekr.20240105153425.94: *6* TT.test_bs_nl_tokens
def test_bs_nl_tokens(self):
    # Test https://bugs.python.org/issue38663.

    contents = """
print \
    ('abc')
"""
    self.check_roundtrip(contents)

# @+node:ekr.20240105153425.95: *6* TT.test_continuation_1
def test_continuation_1(self):
    contents = """
a = (3,4,
    5,6)
y = [3, 4,
    5]
z = {'a': 5,
    'b':15, 'c':True}
x = len(y) + 5 - a[
    3] - a[2] + len(z) - z[
    'b']
"""
    self.check_roundtrip(contents)

# @+node:ekr.20240105153425.96: *6* TT.test_continuation_2
def test_continuation_2(self):
    # Backslash means line continuation, except for comments
    contents = 'x=1+\\\n    2# This is a comment\\\n    # This also'
    self.check_roundtrip(contents)

# @+node:ekr.20240105153425.97: *6* TT.test_continuation_3
def test_continuation_3(self):
    contents = """
# Comment \\\n
x = 0
"""
    self.check_roundtrip(contents)

# @+node:ekr.20240105153425.98: *6* TT.test_string_concatenation_1
def test_string_concatentation_1(self):
    # Two *plain* string literals on the same line
    self.check_roundtrip("""'abc' 'xyz'""")

# @+node:ekr.20240105153425.99: *6* TT.test_string_concatenation_2
def test_string_concatentation_2(self):
    # f-string followed by plain string on the same line
    self.check_roundtrip("""f'abc' 'xyz'""")

# @+node:ekr.20240105153425.100: *6* TT.test_string_concatenation_3
def test_string_concatentation_3(self):
    # plain string followed by f-string on the same line
    self.check_roundtrip("""'abc' f'xyz'""")

# @+node:ekr.20260110075329.1: *3* from leoAst and leoBeautify and related tests
# @+node:ekr.20200107175223.1: *4* 2.2: BaseTest.beautify
def beautify(
    self, contents, tokens, tree, filename=None, max_join_line_length=None, max_split_line_length=None
):
    """
    BaseTest.beautify.
    """
    t1 = get_time()
    if not contents:
        return ''  # pragma: no cover
    if not filename:
        filename = g.callers(2).split(',')[0]
    orange = Orange()
    result_s = orange.beautify(
        contents,
        filename,
        tokens,
        tree,
        max_join_line_length=max_join_line_length,
        max_split_line_length=max_split_line_length,
    )
    t2 = get_time()
    self.update_times('22: beautify', t2 - t1)
    self.code_list = orange.code_list
    return result_s

# @+node:ekr.20240105153420.13: *4* BaseTest.beautify
def beautify(
    self,
    contents,
    tokens,
    filename=None,
    max_join_line_length=None,
    max_split_line_length=None,
):
    """BaseTest.beautify."""
    if not contents:
        return ''  # pragma: no cover
    if not filename:
        filename = g.callers(2).split(',')[0]

    orange = TokenBasedOrange()
    result_s = orange.beautify(contents, filename, tokens)
    self.output_list = orange.output_list
    return result_s

# @+node:ekr.20200107165628.1: *4* beautify-file-diff
@g.command('diff-beautify-files')
@g.command('beautify-files-diff')
def orange_diff_files(event: LeoKeyEvent) -> None:
    """
    Show the diffs that would result from beautifying the external files at
    c.p.
    """
    c = event.get('c')
    if not c or not c.p:
        return
    t1 = time.process_time()
    tag = 'beautify-files-diff'
    g.es(f"{tag}...")
    settings = orange_settings(c)
    roots = g.findRootsWithPredicate(c, c.p)
    for root in roots:
        filename = c.fullPath(root)
        if os.path.exists(filename):
            print('')
            print(f"{tag}: {g.shortFileName(filename)}")
            changed = leoAst.Orange(settings=settings).beautify_file_diff(filename)
            changed_s = 'changed' if changed else 'unchanged'
            g.es(f"{changed_s:>9}: {g.shortFileName(filename)}")
        else:
            print('')
            print(f"{tag}: file not found:{filename}")
            g.es(f"file not found:\n{filename}")
    t2 = time.process_time()
    print('')
    g.es_print(f"{tag}: {len(roots)} file{g.plural(len(roots))} in {t2 - t1:5.2f} sec.")


# @+node:ekr.20200107165603.1: *4* beautify-files
@g.command('beautify-files')
def orange_files(event: LeoKeyEvent) -> None:
    """beautify one or more files at c.p."""
    c = event.get('c')
    if not c or not c.p:
        return
    t1 = time.process_time()
    tag = 'beautify-files'
    g.es(f"{tag}...")
    settings = orange_settings(c)
    roots = g.findRootsWithPredicate(c, c.p)
    n_changed = 0
    for root in roots:
        filename = c.fullPath(root)
        if os.path.exists(filename):
            changed = leoAst.Orange(settings=settings).beautify_file(filename)
            if changed:
                n_changed += 1
        else:
            g.es_print(f"file not found: {filename}")
    t2 = time.process_time()
    print('')
    g.es_print(f"total files: {len(roots)}, changed files: {n_changed}, in {t2 - t1:5.2f} sec.")


# @+node:ekr.20200107165250.1: *4* class Orange
class Orange:  # Orange is the new Black.
    """
    This class is deprecated. Use the TokenBasedOrange class in leoTokens.py

    This class is a demo of the TokenOrderGenerator class.

    This is a predominantly a *token-based* beautifier. However,
    orange.do_op, orange.colon, and orange.possible_unary_op use the parse
    tree to provide context that would otherwise be difficult to deduce.
    """

    # This switch is really a comment. It will always be false.
    # It marks the code that simulates the operation of the black tool.
    black_mode = False

    # Patterns...
    nobeautify_pat = re.compile(r'\s*#\s*pragma:\s*no\s*beautify\b|#\s*@@nobeautify')

    # Patterns from FastAtRead class, specialized for python delims.
    node_pat = re.compile(r'^(\s*)#@\+node:([^:]+): \*(\d+)?(\*?) (.*)$')  # @node
    start_doc_pat = re.compile(r'^\s*#@\+(at|doc)?(\s.*?)?$')  # @doc or @
    at_others_pat = re.compile(r'^(\s*)#@(\+|-)others\b(.*)$')  # @others

    # Doc parts end with @c or a node sentinel. Specialized for python.
    end_doc_pat = re.compile(r"^\s*#@(@(c(ode)?)|([+]node\b.*))$")

    @others


# @+node:ekr.20200107165250.2: *5* orange.ctor
def __init__(self, settings: Settings = None):
    """Ctor for Orange class."""
    if settings is None:
        settings = {}
    valid_keys = (
        'allow_joined_strings',
        'force',
        'max_join_line_length',
        'max_split_line_length',
        'orange',
        'tab_width',
        'verbose',
    )
    # For mypy...
    self.kind: str = ''
    # Default settings...
    self.allow_joined_strings = False  # EKR's preference.
    self.force = False
    self.max_join_line_length = 88
    self.max_split_line_length = 88
    self.tab_width = 4
    self.verbose = False
    # Override from settings dict...
    for key in settings:  # pragma: no cover
        value = settings.get(key)
        if key in valid_keys and value is not None:
            setattr(self, key, value)
        else:
            g.trace(f"Unexpected setting: {key} = {value!r}")

# @+node:ekr.20200107165250.51: *5* orange.push_state
def push_state(self, kind: str, value: Union[int, str] = None) -> None:
    """Append a state to the state stack."""
    state = ParseState(kind, value)
    self.state_stack.append(state)

# @+node:ekr.20200107165250.8: *5* orange: Entries & helpers
# @+node:ekr.20200107173542.1: *6* orange.beautify (main token loop)
def oops(self) -> None:  # pragma: no cover
    g.trace(f"Unknown kind: {self.kind}")

def beautify(
    self,
    contents: str,
    filename: str,
    tokens: list[Token],
    tree: Optional[Node] = None,
    max_join_line_length: Optional[int] = None,
    max_split_line_length: Optional[int] = None,
) -> str:
    """
    The main line. Create output tokens and return the result as a string.

    beautify_file and beautify_file_def call this method.
    """
    # Config overrides
    if max_join_line_length is not None:
        self.max_join_line_length = max_join_line_length
    if max_split_line_length is not None:
        self.max_split_line_length = max_split_line_length
    # State vars...
    self.curly_brackets_level = 0  # Number of unmatched '{' tokens.
    self.decorator_seen = False  # Set by do_name for do_op.
    self.in_arg_list = 0  # > 0 if in an arg list of a def.
    self.in_fstring = False  # True: scanning an f-string.
    self.level = 0  # Set only by do_indent and do_dedent.
    self.lws = ''  # Leading whitespace.
    self.paren_level = 0  # Number of unmatched '(' tokens.
    self.square_brackets_stack: list[bool] = []  # A stack of bools, for self.word().
    self.state_stack: list["ParseState"] = []  # Stack of ParseState objects.
    self.val = None  # The input token's value (a string).
    self.verbatim = False  # True: don't beautify.
    #
    # Init output list and state...
    self.code_list: list[OutputToken] = []  # The list of output tokens.
    self.tokens = tokens  # The list of input tokens.
    self.tree = tree
    self.add_token('file-start', '')
    self.push_state('file-start')
    for token in tokens:
        self.token = token
        self.kind, self.val, self.line = token.kind, token.value, token.line
        if self.verbatim:
            self.do_verbatim()
        elif self.in_fstring:
            self.continue_fstring()
        else:
            func = getattr(self, f"do_{token.kind}", self.oops)
            func()
    # Any post pass would go here.
    return output_tokens_to_string(self.code_list)

# @+node:ekr.20200107172450.1: *6* orange.beautify_file (entry)
def beautify_file(self, filename: str) -> bool:  # pragma: no cover
    """
    Orange: Beautify the the given external file.

    Return True if the file was changed.
    """
    self.filename = filename
    # Annotate the tokens.
    tog = TokenOrderGenerator()
    contents, encoding, tokens, tree = tog.init_from_file(filename)
    if not contents or not tokens or not tree:
        return False  # Not an error.
    # Beautify.
    try:
        results = self.beautify(contents, filename, tokens, tree)  # type:ignore
    except BeautifyError:
        return False  # #2578.
    # Something besides newlines must change.
    if regularize_nls(contents) == regularize_nls(results):
        return False
    if 0:  # This obscures more import error messages.
        show_diffs(contents, results, filename=filename)
    # Write the results
    print(f"Beautified: {g.shortFileName(filename)}")
    write_file(filename, results, encoding=encoding)
    return True

# @+node:ekr.20200107172512.1: *6* orange.beautify_file_diff (entry)
def beautify_file_diff(self, filename: str) -> bool:  # pragma: no cover
    """
    Orange: Print the diffs that would result from the orange-file command.

    Return True if the file would be changed.
    """
    tag = 'diff-beautify-file'
    self.filename = filename

    if 1:  # Legacy: use parse trees.
        tog = TokenOrderGenerator()
        contents, encoding, tokens, tree = tog.init_from_file(filename)
        if not contents or not tokens or not tree:
            print(f"{tag}: Can not beautify: {filename}")
            return False

    # Beautify
    results = self.beautify(contents, filename, tokens, tree)
    # Something besides newlines must change.
    if regularize_nls(contents) == regularize_nls(results):
        print(f"{tag}: Unchanged: {filename}")
        return False
    # Show the diffs.
    show_diffs(contents, results, filename=filename)
    return True

# @+node:ekr.20240104093833.1: *6* orange.init_tokens_from_file
def init_tokens_from_file(self, filename: str) -> tuple[str, str, list[Token]]:  # pragma: no cover
    """
    Create the list of tokens for the given file.
    Return (contents, encoding, tokens).
    """
    self.level = 0
    self.filename = filename
    contents, encoding = g.readFileIntoString(filename)
    if not contents:
        return None, None, None
    self.tokens = tokens = self.make_tokens(contents)
    return contents, encoding, tokens

# @+node:ekr.20240104123947.1: *6* orange.make_tokens
def make_tokens(self, contents: str) -> list[Token]:
    """
    Return a list (not a generator) of Token objects corresponding to the
    list of 5-tuples generated by tokenize.tokenize.

    Perform consistency checks and handle all exceptions.
    """

    def check(contents: str, tokens: list[Token]) -> bool:
        result = tokens_to_string(tokens)
        ok = result == contents
        if not ok:
            print('\nRound-trip check FAILS')
            print('Contents...\n')
            g.printObj(contents)
            print('\nResult...\n')
            g.printObj(result)
        return ok

    try:
        five_tuples = tokenize.tokenize(io.BytesIO(contents.encode('utf-8')).readline)
    except Exception:
        print('make_tokens: exception in tokenize.tokenize')
        g.es_exception()
        return None
    tokens = Tokenizer().create_tokens(contents, five_tuples)
    assert check(contents, tokens)
    return tokens

# @+node:ekr.20200107165250.13: *5* orange: Input token handlers
# @+node:ekr.20200107165250.14: *6* orange.do_comment
in_doc_part = False

comment_pat = re.compile(r'^(\s*)#[^@!# \n]')

def do_comment(self) -> None:
    """Handle a comment token."""
    val = self.val
    #
    # Leo-specific code...
    if self.node_pat.match(val):
        # Clear per-node state.
        self.in_doc_part = False
        self.verbatim = False
        self.decorator_seen = False
        # Do *not clear other state, which may persist across @others.
        # self.curly_brackets_level = 0
        # self.in_arg_list = 0
        # self.level = 0
        # self.lws = ''
        # self.paren_level = 0
        # self.square_brackets_stack = []
        # self.state_stack = []
    else:
        # Keep track of verbatim mode.
        if self.beautify_pat.match(val):
            self.verbatim = False
        elif self.nobeautify_pat.match(val):
            self.verbatim = True
        # Keep trace of @doc parts, to honor the convention for splitting lines.
        if self.start_doc_pat.match(val):
            self.in_doc_part = True
        if self.end_doc_pat.match(val):
            self.in_doc_part = False
    #
    # General code: Generate the comment.
    self.clean('blank')
    entire_line = self.line.lstrip().startswith('#')
    if entire_line:
        self.clean('hard-blank')
        self.clean('line-indent')
        # #1496: No further munging needed.
        val = self.line.rstrip()
        # #3056: Insure one space after '#' in non-sentinel comments.
        #        Do not change bang lines or '##' comments.
        if m := self.comment_pat.match(val):
            i = len(m.group(1))
            val = val[:i] + '# ' + val[i + 1 :]
    else:
        # Exactly two spaces before trailing comments.
        val = '  ' + self.val.rstrip()
    self.add_token('comment', val)

# @+node:ekr.20200107165250.15: *6* orange.do_encoding
def do_encoding(self) -> None:
    """
    Handle the encoding token.
    """
    pass

# @+node:ekr.20200107165250.16: *6* orange.do_endmarker
def do_endmarker(self) -> None:
    """Handle an endmarker token."""
    # Ensure exactly one blank at the end of the file.
    self.clean_blank_lines()
    self.add_token('line-end', '\n')

# @+node:ekr.20231215212951.1: *6* orange.do_fstring_start & continue_fstring
def do_fstring_start(self) -> None:
    """Handle the 'fstring_start' token. Enter f-string mode."""
    self.in_fstring = True
    self.add_token('verbatim', self.val)

def continue_fstring(self) -> None:
    """
    Put the next token in f-fstring mode.
    Exit f-string mode if the token is 'fstring_end'.
    """
    self.add_token('verbatim', self.val)
    if self.kind == 'fstring_end':
        self.in_fstring = False

# @+node:ekr.20200107165250.18: *6* orange.do_indent & do_dedent & helper
# Note: other methods use self.level.

def do_dedent(self) -> None:
    """Handle dedent token."""
    self.level -= 1
    self.lws = self.level * self.tab_width * ' '
    self.line_indent()
    if self.black_mode:  # pragma: no cover (black)
        state = self.state_stack[-1]
        if state.kind == 'indent' and state.value == self.level:
            self.state_stack.pop()
            state = self.state_stack[-1]
            if state.kind in ('class', 'def'):
                self.state_stack.pop()
                self.handle_dedent_after_class_or_def(state.kind)

def do_indent(self) -> None:
    """Handle indent token."""
    # #2578: Refuse to beautify files containing leading tabs or unusual indentation.
    consider_message = 'consider using python/Tools/scripts/reindent.py'
    if '\t' in self.val:  # pragma: no cover
        message = f"Leading tabs found: {self.filename}"
        print(message)
        print(consider_message)
        raise BeautifyError(message)
    if (len(self.val) % self.tab_width) != 0:  # pragma: no cover
        message = f" Indentation error: {self.filename}"
        print(message)
        print(consider_message)
        raise BeautifyError(message)
    new_indent = self.val
    old_indent = self.level * self.tab_width * ' '
    if new_indent > old_indent:
        self.level += 1
    elif new_indent < old_indent:  # pragma: no cover (defensive)
        g.trace('\n===== can not happen', repr(new_indent), repr(old_indent))
    self.lws = new_indent
    self.line_indent()

# @+node:ekr.20200220054928.1: *7* orange.handle_dedent_after_class_or_def
def handle_dedent_after_class_or_def(self, kind: str) -> None:  # pragma: no cover (black)
    """
    Insert blank lines after a class or def as the result of a 'dedent' token.

    Normal comment lines may precede the 'dedent'.
    Insert the blank lines *before* such comment lines.
    """
    #
    # Compute the tail.
    i = len(self.code_list) - 1
    tail: list[OutputToken] = []
    while i > 0:
        t = self.code_list.pop()
        i -= 1
        if t.kind == 'line-indent':
            pass
        elif t.kind == 'line-end':
            tail.insert(0, t)
        elif t.kind == 'comment':
            # Only underindented single-line comments belong in the tail.
            # @verbatim
            # @+node comments must never be in the tail.
            single_line = self.code_list[i].kind in ('line-end', 'line-indent')
            lws = len(t.value) - len(t.value.lstrip())
            underindent = lws <= len(self.lws)
            if underindent and single_line and not self.node_pat.match(t.value):
                # A single-line comment.
                tail.insert(0, t)
            else:
                self.code_list.append(t)
                break
        else:
            self.code_list.append(t)
            break
    #
    # Remove leading 'line-end' tokens from the tail.
    while tail and tail[0].kind == 'line-end':
        tail = tail[1:]
    #
    # Put the newlines *before* the tail.
    # For Leo, always use 1 blank lines.
    n = 1  # n = 2 if kind == 'class' else 1
    # Retain the token (intention) for debugging.
    self.add_token('blank-lines', n)
    for _i in range(0, n + 1):
        self.add_token('line-end', '\n')
    if tail:
        self.code_list.extend(tail)
    self.line_indent()

# @+node:ekr.20200107165250.20: *6* orange.do_name
def do_name(self) -> None:
    """Handle a name token."""
    name = self.val
    if self.black_mode and name in ('class', 'def'):  # pragma: no cover (black)
        # Handle newlines before and after 'class' or 'def'
        self.decorator_seen = False
        state = self.state_stack[-1]
        if state.kind == 'decorator':
            # Always do this, regardless of @bool clean-blank-lines.
            self.clean_blank_lines()
            # Suppress split/join.
            self.add_token('hard-newline', '\n')
            self.add_token('line-indent', self.lws)
            self.state_stack.pop()
        else:
            # Always do this, regardless of @bool clean-blank-lines.
            self.blank_lines(2 if name == 'class' else 1)
        self.push_state(name)
        # For trailing lines after inner classes/defs.
        self.push_state('indent', self.level)
        self.word(name)
        return
    #
    # Leo mode...
    if name in ('class', 'def'):
        self.word(name)
    elif name in (
        'and',
        'elif',
        'else',
        'for',
        'if',
        'in',
        'not',
        'not in',
        'or',
        'while',
    ):
        self.word_op(name)
    else:
        self.word(name)

# @+node:ekr.20200107165250.21: *6* orange.do_newline & do_nl
def do_newline(self) -> None:
    """Handle a regular newline."""
    self.line_end()

def do_nl(self) -> None:
    """Handle a continuation line."""
    self.line_end()

# @+node:ekr.20200107165250.22: *6* orange.do_number
def do_number(self) -> None:
    """Handle a number token."""
    self.blank()
    self.add_token('number', self.val)

# @+node:ekr.20200107165250.23: *6* orange.do_op & helper
def do_op(self) -> None:
    """Handle an op token."""
    val = self.val
    if val == '.':
        self.clean('blank')
        prev = self.code_list[-1]
        # #2495 & #2533: Special case for 'from .'
        if prev.kind == 'word' and prev.value == 'from':
            self.blank()
        self.add_token('op-no-blanks', val)
    elif val == '@':
        if self.black_mode:  # pragma: no cover (black)
            if not self.decorator_seen:
                self.blank_lines(1)
                self.decorator_seen = True
        self.clean('blank')
        self.add_token('op-no-blanks', val)
        self.push_state('decorator')
    elif val == ':':
        # Treat slices differently.
        self.colon(val)
    elif val in ',;':
        # Pep 8: Avoid extraneous whitespace immediately before
        # comma, semicolon, or colon.
        self.clean('blank')
        self.add_token('op', val)
        self.blank()
    elif val in '([{':
        # Pep 8: Avoid extraneous whitespace immediately inside
        # parentheses, brackets or braces.
        self.lt(val)
    elif val in ')]}':
        # Ditto.
        self.rt(val)
    elif val == '=':
        self.do_equal_op(val)
    elif val in '~+-':
        self.possible_unary_op(val)
    elif val == '*':
        self.star_op()
    elif val == '**':
        self.star_star_op()
    else:
        # Pep 8: always surround binary operators with a single space.
        # '==','+=','-=','*=','**=','/=','//=','%=','!=','<=','>=','<','>',
        # '^','~','*','**','&','|','/','//',
        # Pep 8: If operators with different priorities are used,
        # consider adding whitespace around the operators with the lowest priorities.
        self.blank()
        self.add_token('op', val)
        self.blank()

# @+node:ekr.20230115141629.1: *7* orange.do_equal_op
# Keys: token.index of '=' token. Values: count of ???s
arg_dict: dict[int, int] = {}

dump_flag = True

def do_equal_op(self, val: str) -> None:
    if 0:
        token = self.token
        g.trace(
            f"token.index: {token.index:2} paren_level: {self.paren_level} "
            f"token.equal_sign_spaces: {int(token.equal_sign_spaces)} "
            # f"{token.node.__class__.__name__}"
        )
        # dump_tree(self.tokens, self.tree)
    if self.token.equal_sign_spaces:
        self.blank()
        self.add_token('op', val)
        self.blank()
    else:
        # Pep 8: Don't use spaces around the = sign when used to indicate
        #        a keyword argument or a default parameter value.
        #        However, hen combining an argument annotation with a default value,
        #        *do* use spaces around the = sign
        self.clean('blank')
        self.add_token('op-no-blanks', val)

# @+node:ekr.20200107165250.24: *6* orange.do_string
def do_string(self) -> None:
    """Handle a 'string' token."""
    # Careful: continued strings may contain '\r'
    val = regularize_nls(self.val)
    self.add_token('string', val)
    self.blank()

# @+node:ekr.20200210175117.1: *6* orange.do_verbatim
beautify_pat = re.compile(r'#\s*pragma:\s*beautify\b|#\s*@@beautify|#\s*@\+node|#\s*@[+-]others|#\s*@[+-]<<')

def do_verbatim(self) -> None:
    """
    Handle one token in verbatim mode.
    End verbatim mode when the appropriate comment is seen.
    """
    kind = self.kind
    #
    # Careful: tokens may contain '\r'
    val = regularize_nls(self.val)
    if kind == 'comment':
        if self.beautify_pat.match(val):
            self.verbatim = False
        val = val.rstrip()
        self.add_token('comment', val)
        return
    if kind == 'indent':
        self.level += 1
        self.lws = self.level * self.tab_width * ' '
    if kind == 'dedent':
        self.level -= 1
        self.lws = self.level * self.tab_width * ' '
    self.add_token('verbatim', val)

# @+node:ekr.20200107165250.25: *6* orange.do_ws
def do_ws(self) -> None:
    """
    Handle the "ws" pseudo-token.

    Put the whitespace only if if ends with backslash-newline.
    """
    val = self.val
    # Handle backslash-newline.
    if '\\\n' in val:
        self.clean('blank')
        self.add_token('op-no-blanks', val)
        return
    # Handle start-of-line whitespace.
    prev = self.code_list[-1]
    inner = self.paren_level or self.square_brackets_stack or self.curly_brackets_level
    if prev.kind == 'line-indent' and inner:
        # Retain the indent that won't be cleaned away.
        self.clean('line-indent')
        self.add_token('hard-blank', val)

# @+node:ekr.20200107165250.26: *5* orange: Output token generators
# @+node:ekr.20200118145044.1: *6* orange.add_line_end
def add_line_end(self) -> OutputToken:
    """Add a line-end request to the code list."""
    # This may be called from do_name as well as do_newline and do_nl.
    assert self.token.kind in ('newline', 'nl'), self.token.kind
    self.clean('blank')  # Important!
    self.clean('line-indent')
    t = self.add_token('line-end', '\n')
    # Distinguish between kinds of 'line-end' tokens.
    t.newline_kind = self.token.kind
    return t

# @+node:ekr.20200107170523.1: *6* orange.add_token
def add_token(self, kind: str, value: Value) -> OutputToken:
    """Add an output token to the code list."""
    tok = OutputToken(kind, value)
    tok.index = len(self.code_list)
    self.code_list.append(tok)
    return tok

# @+node:ekr.20200107165250.27: *6* orange.blank
def blank(self) -> None:
    """Add a blank request to the code list."""
    prev = self.code_list[-1]
    if prev.kind not in (
        'blank',
        'blank-lines',
        'file-start',
        'hard-blank',  # Unique to orange.
        'line-end',
        'line-indent',
        'lt',
        'op-no-blanks',
        'unary-op',
    ):
        self.add_token('blank', ' ')

# @+node:ekr.20200107165250.29: *6* orange.blank_lines (black only)
def blank_lines(self, n: int) -> None:  # pragma: no cover (black)
    """
    Add a request for n blank lines to the code list.
    Multiple blank-lines request yield at least the maximum of all requests.
    """
    self.clean_blank_lines()
    prev = self.code_list[-1]
    if prev.kind == 'file-start':
        self.add_token('blank-lines', n)
        return
    for _i in range(0, n + 1):
        self.add_token('line-end', '\n')
    # Retain the token (intention) for debugging.
    self.add_token('blank-lines', n)
    self.line_indent()

# @+node:ekr.20200107165250.30: *6* orange.clean
def clean(self, kind: str) -> None:
    """Remove the last item of token list if it has the given kind."""
    prev = self.code_list[-1]
    if prev.kind == kind:
        self.code_list.pop()

# @+node:ekr.20200107165250.31: *6* orange.clean_blank_lines
def clean_blank_lines(self) -> bool:
    """
    Remove all vestiges of previous blank lines.

    Return True if any of the cleaned 'line-end' tokens represented "hard" newlines.
    """
    cleaned_newline = False
    table = ('blank-lines', 'line-end', 'line-indent')
    while self.code_list[-1].kind in table:
        t = self.code_list.pop()
        if t.kind == 'line-end' and getattr(t, 'newline_kind', None) != 'nl':
            cleaned_newline = True
    return cleaned_newline

# @+node:ekr.20200107165250.32: *6* orange.colon
def colon(self, val: str) -> None:
    """Handle a colon."""

    def is_expr(node: Node) -> bool:
        """True if node is any expression other than += number."""
        if isinstance(node, (ast.BinOp, ast.Call, ast.IfExp)):
            return True
        num_node = ast.Num if g.python_version_tuple < (3, 12, 0) else ast.Constant  # pylint: disable=no-member
        return isinstance(node, ast.UnaryOp) and not isinstance(node.operand, num_node)

    node = self.token.node
    self.clean('blank')
    if not isinstance(node, ast.Slice):
        self.add_token('op', val)
        self.blank()
        return
    # A slice.
    lower = getattr(node, 'lower', None)
    upper = getattr(node, 'upper', None)
    step = getattr(node, 'step', None)
    if any(is_expr(z) for z in (lower, upper, step)):
        prev = self.code_list[-1]
        if prev.value not in '[:':
            self.blank()
        self.add_token('op', val)
        self.blank()
    else:
        self.add_token('op-no-blanks', val)

# @+node:ekr.20200107165250.33: *6* orange.line_end
def line_end(self) -> None:
    """Add a line-end request to the code list."""
    # This should be called only be do_newline and do_nl.
    node, token = self.token.statement_node, self.token
    assert token.kind in ('newline', 'nl'), (token.kind, g.callers())
    # Create the 'line-end' output token.
    self.add_line_end()
    # Attempt to split the line.
    was_split = self.split_line(node, token)
    # Attempt to join the line only if it has not just been split.
    if not was_split and self.max_join_line_length > 0:
        self.join_lines(node, token)
    # Add the indentation for all lines
    # until the next indent or unindent token.
    self.line_indent()

# @+node:ekr.20200107165250.40: *6* orange.line_indent
def line_indent(self) -> None:
    """Add a line-indent token."""
    self.clean('line-indent')  # Defensive. Should never happen.
    self.add_token('line-indent', self.lws)

# @+node:ekr.20200107165250.41: *6* orange.lt & rt
# @+node:ekr.20200107165250.42: *7* orange.lt
def lt(self, val: str) -> None:
    """Generate code for a left paren or curly/square bracket."""
    assert val in '([{', repr(val)
    if val == '(':
        self.paren_level += 1
    elif val == '[':
        self.square_brackets_stack.append(False)
    else:
        self.curly_brackets_level += 1
    self.clean('blank')
    prev = self.code_list[-1]
    if prev.kind in ('op', 'word-op'):
        self.blank()
        self.add_token('lt', val)
    elif prev.kind == 'word':
        # Only suppress blanks before '(' or '[' for non-keywords.
        if val == '{' or prev.value in ('if', 'else', 'return', 'for'):
            self.blank()
        elif val == '(':
            self.in_arg_list += 1
        self.add_token('lt', val)
    else:
        self.clean('blank')
        self.add_token('op-no-blanks', val)

# @+node:ekr.20200107165250.43: *7* orange.rt
def rt(self, val: str) -> None:
    """Generate code for a right paren or curly/square bracket."""
    assert val in ')]}', repr(val)
    if val == ')':
        self.paren_level -= 1
        self.in_arg_list = max(0, self.in_arg_list - 1)
    elif val == ']':
        self.square_brackets_stack.pop()
    else:
        self.curly_brackets_level -= 1
    self.clean('blank')
    self.add_token('rt', val)

# @+node:ekr.20200107165250.45: *6* orange.possible_unary_op & unary_op
def possible_unary_op(self, s: str) -> None:
    """Add a unary or binary op to the token list."""
    node = self.token.node
    self.clean('blank')
    if isinstance(node, ast.UnaryOp):
        self.unary_op(s)
    else:
        self.blank()
        self.add_token('op', s)
        self.blank()

def unary_op(self, s: str) -> None:
    """Add an operator request to the code list."""
    assert s and isinstance(s, str), repr(s)
    self.clean('blank')
    prev = self.code_list[-1]
    if prev.kind == 'lt':
        self.add_token('unary-op', s)
    else:
        self.blank()
        self.add_token('unary-op', s)

# @+node:ekr.20200107165250.46: *6* orange.star_op
def star_op(self) -> None:
    """Put a '*' op, with special cases for *args."""
    val = '*'
    node = self.token.node
    self.clean('blank')
    if isinstance(node, ast.arguments):
        self.blank()
        self.add_token('op', val)
        return  # #2533
    if self.paren_level > 0:
        prev = self.code_list[-1]
        if prev.kind == 'lt' or (prev.kind, prev.value) == ('op', ','):
            self.blank()
            self.add_token('op', val)
            return
    self.blank()
    self.add_token('op', val)
    self.blank()

# @+node:ekr.20200107165250.47: *6* orange.star_star_op
def star_star_op(self) -> None:
    """Put a ** operator, with a special case for **kwargs."""
    val = '**'
    node = self.token.node
    self.clean('blank')
    if isinstance(node, ast.arguments):
        self.blank()
        self.add_token('op', val)
        return  # #2533
    if self.paren_level > 0:
        prev = self.code_list[-1]
        if prev.kind == 'lt' or (prev.kind, prev.value) == ('op', ','):
            self.blank()
            self.add_token('op', val)
            return
    self.blank()
    self.add_token('op', val)
    self.blank()

# @+node:ekr.20200107165250.48: *6* orange.word & word_op
def word(self, s: str) -> None:
    """Add a word request to the code list."""
    assert s and isinstance(s, str), repr(s)
    node = self.token.node
    if isinstance(node, ast.ImportFrom) and s == 'import':  # #2533
        self.clean('blank')
        self.add_token('blank', ' ')
        self.add_token('word', s)
    elif self.square_brackets_stack:
        # A previous 'op-no-blanks' token may cancel this blank.
        self.blank()
        self.add_token('word', s)
    elif self.in_arg_list > 0:
        self.add_token('word', s)
        self.blank()
    else:
        self.blank()
        self.add_token('word', s)
        self.blank()

def word_op(self, s: str) -> None:
    """Add a word-op request to the code list."""
    assert s and isinstance(s, str), repr(s)
    self.blank()
    self.add_token('word-op', s)
    self.blank()

# @+node:ekr.20200118120049.1: *5* orange: Split/join
# @+node:ekr.20200107165250.34: *6* orange.split_line & helpers
def split_line(self, node: Node, token: Token) -> bool:
    """
    Split token's line, if possible and enabled.

    Return True if the line was broken into two or more lines.
    """
    assert token.kind in ('newline', 'nl'), repr(token)
    # Return if splitting is disabled:
    if self.max_split_line_length <= 0:  # pragma: no cover (user option)
        return False
    # Return if the node can't be split.
    if not is_long_statement(node):
        return False
    # Find the *output* tokens of the previous lines.
    line_tokens = self.find_prev_line()
    line_s = ''.join([z.to_string() for z in line_tokens])
    # Do nothing for short lines.
    if len(line_s) < self.max_split_line_length:
        return False
    # Return if the previous line has no opening delim: (, [ or {.
    if not any(z.kind == 'lt' for z in line_tokens):  # pragma: no cover (defensive)
        return False
    prefix = self.find_line_prefix(line_tokens)
    # Calculate the tail before cleaning the prefix.
    tail = line_tokens[len(prefix) :]
    # Cut back the token list: subtract 1 for the trailing line-end.
    self.code_list = self.code_list[: len(self.code_list) - len(line_tokens) - 1]
    # Append the tail, splitting it further, as needed.
    self.append_tail(prefix, tail)
    # Add the line-end token deleted by find_line_prefix.
    self.add_token('line-end', '\n')
    return True

# @+node:ekr.20200107165250.35: *7* orange.append_tail
def append_tail(self, prefix: list[OutputToken], tail: list[OutputToken]) -> None:
    """Append the tail tokens, splitting the line further as necessary."""
    tail_s = ''.join([z.to_string() for z in tail])
    if len(tail_s) < self.max_split_line_length:
        # Add the prefix.
        self.code_list.extend(prefix)
        # Start a new line and increase the indentation.
        self.add_token('line-end', '\n')
        self.add_token('line-indent', self.lws + ' ' * 4)
        self.code_list.extend(tail)
        return
    # Still too long.  Split the line at commas.
    self.code_list.extend(prefix)
    # Start a new line and increase the indentation.
    self.add_token('line-end', '\n')
    self.add_token('line-indent', self.lws + ' ' * 4)
    open_delim = Token(kind='lt', value=prefix[-1].value)
    value = open_delim.value.replace('(', ')').replace('[', ']').replace('{', '}')
    close_delim = Token(kind='rt', value=value)
    delim_count = 1
    lws = self.lws + ' ' * 4
    for i, t in enumerate(tail):
        if t.kind == 'op' and t.value == ',':
            if delim_count == 1:
                # Start a new line.
                self.add_token('op-no-blanks', ',')
                self.add_token('line-end', '\n')
                self.add_token('line-indent', lws)
                # Kill a following blank.
                if i + 1 < len(tail):
                    next_t = tail[i + 1]
                    if next_t.kind == 'blank':
                        next_t.kind = 'no-op'
                        next_t.value = ''
            else:
                self.code_list.append(t)
        elif t.kind == close_delim.kind and t.value == close_delim.value:
            # Done if the delims match.
            delim_count -= 1
            if delim_count == 0:
                # Start a new line
                self.add_token('op-no-blanks', ',')
                self.add_token('line-end', '\n')
                self.add_token('line-indent', self.lws)
                self.code_list.extend(tail[i:])
                return
            lws = lws[:-4]
            self.code_list.append(t)
        elif t.kind == open_delim.kind and t.value == open_delim.value:
            delim_count += 1
            lws = lws + ' ' * 4
            self.code_list.append(t)
        else:
            self.code_list.append(t)
    g.trace('BAD DELIMS', delim_count)  # pragma: no cover

# @+node:ekr.20200107165250.36: *7* orange.find_prev_line
def find_prev_line(self) -> list[OutputToken]:
    """Return the previous line, as a list of tokens."""
    line = []
    for t in reversed(self.code_list[:-1]):
        if t.kind in ('hard-newline', 'line-end'):
            break
        line.append(t)
    return list(reversed(line))

# @+node:ekr.20200107165250.37: *7* orange.find_line_prefix
def find_line_prefix(self, token_list: list[OutputToken]) -> list[OutputToken]:
    """
    Return all tokens up to and including the first lt token.
    Also add all lt tokens directly following the first lt token.
    """
    result = []
    for t in token_list:
        result.append(t)
        if t.kind == 'lt':
            break
    return result

# @+node:ekr.20200107165250.39: *6* orange.join_lines
def join_lines(self, node: Node, token: Token) -> None:
    """
    Join preceding lines, if possible and enabled.
    token is a line_end token. node is the corresponding ast node.
    """
    if self.max_join_line_length <= 0:  # pragma: no cover (user option)
        return
    assert token.kind in ('newline', 'nl'), repr(token)
    if token.kind == 'nl':
        return
    # Scan backward in the *code* list,
    # looking for 'line-end' tokens with tok.newline_kind == 'nl'
    nls = 0
    i = len(self.code_list) - 1
    t = self.code_list[i]
    assert t.kind == 'line-end', repr(t)
    # Not all tokens have a newline_kind ivar.
    assert t.newline_kind == 'newline'
    i -= 1
    while i >= 0:
        t = self.code_list[i]
        if t.kind == 'comment':
            # Can't join.
            return
        if t.kind == 'string' and not self.allow_joined_strings:
            # An EKR preference: don't join strings, no matter what black does.
            # This allows "short" f-strings to be aligned.
            return
        if t.kind == 'line-end':
            if getattr(t, 'newline_kind', None) == 'nl':
                nls += 1
            else:
                break  # pragma: no cover
        i -= 1
    # Retain at the file-start token.
    if i <= 0:
        i = 1
    if nls <= 0:  # pragma: no cover (rare)
        return
    # Retain line-end and and any following line-indent.
    # Required, so that the regex below won't eat too much.
    while True:
        t = self.code_list[i]
        if t.kind == 'line-end':
            if getattr(t, 'newline_kind', None) == 'nl':  # pragma: no cover (rare)
                nls -= 1
            i += 1
        elif self.code_list[i].kind == 'line-indent':
            i += 1
        else:
            break  # pragma: no cover (defensive)
    if nls <= 0:  # pragma: no cover (defensive)
        return
    # Calculate the joined line.
    tail = self.code_list[i:]
    tail_s = output_tokens_to_string(tail)
    tail_s = re.sub(r'\n\s*', ' ', tail_s)
    tail_s = tail_s.replace('( ', '(').replace(' )', ')')
    tail_s = tail_s.rstrip()
    # Don't join the lines if they would be too long.
    if len(tail_s) > self.max_join_line_length:  # pragma: no cover (defensive)
        return
    # Cut back the code list.
    self.code_list = self.code_list[:i]
    # Add the new output tokens.
    self.add_token('string', tail_s)
    self.add_token('line-end', '\n')

# @+node:ekr.20200107170126.1: *4* class ParseState
class ParseState:
    """
    A class representing items in the parse state stack.

    The present states:

    'file-start': Ensures the stack stack is never empty.

    'decorator': The last '@' was a decorator.

        do_op():    push_state('decorator')
        do_name():  pops the stack if state.kind == 'decorator'.

    'indent': The indentation level for 'class' and 'def' names.

        do_name():      push_state('indent', self.level)
        do_dendent():   pops the stack once or twice if state.value == self.level.

    """

    def __init__(self, kind: str, value: Union[int, str]) -> None:
        self.kind = kind
        self.value = value

    def __repr__(self) -> str:
        return f"State: {self.kind} {self.value!r}"  # pragma: no cover

    __str__ = __repr__


# @+node:ekr.20200107174645.1: *4* class TestOrange (BaseTest)
class TestOrange(BaseTest):
    """
    Tests for the Orange class.

    **Important**: All unit tests assume that black_mode is False.
                   That is, unit tests assume that no blank lines
                   are ever inserted or deleted.
    """

    @others


# @+node:ekr.20200115201823.1: *5* TestOrange.blacken
def blacken(self, contents, line_length=None):
    """Return the results of running black on contents"""
    if not black:
        self.skipTest('Requires Black')  # pragma: no cover
    # Suppress string normalization!
    try:
        mode = black.FileMode()
        mode.string_normalization = False
        if line_length is not None:
            mode.line_length = line_length
    except TypeError:  # pragma: no cover
        self.skipTest('Requires later version of Black')
    return black.format_str(contents, mode=mode)

# @+node:ekr.20230115150916.1: *5* TestOrange.test_annotations
def test_annotations(self):
    table = (
        # Case 0.
        '''
def annotated_f(s: str = None, x=None) -> None:
    pass
''',
    )
    for i, contents in enumerate(table):
        contents, tokens, tree = self.make_data(contents)
        expected = self.blacken(contents).strip() + '\n'
        results = self.beautify(contents, tokens, tree)
        self.assertEqual(results, expected)

# @+node:ekr.20200219114415.1: *5* TestOrange.test_at_doc_part
def test_at_doc_part(self):
    line_length = 40  # For testing.
    contents = """
@ Line 1
Line 2
@c

print('hi')
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(
        contents,
        tokens,
        tree,
        max_join_line_length=line_length,
        max_split_line_length=line_length,
    )
    self.assertEqual(results, expected)

# @+node:ekr.20200116102345.1: *5* TestOrange.test_backslash_newline
def test_backslash_newline(self):
    """
    This test is necessarily different from black, because orange doesn't
    delete semicolon tokens.
    """
    contents = r"""
print(a);\
print(b)
print(c); \
print(d)
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    # expected = self.blacken(contents).rstrip() + '\n'
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200219145639.1: *5* TestOrange.test_blank_lines_after_function
def test_blank_lines_after_function(self):
    contents = """
# Comment line 1.
# Comment line 2.

def spam():
    pass
    # Properly indented comment.

# Comment line3.
# Comment line4.
a = 2
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200220050758.1: *5* TestOrange.test_blank_lines_after_function_2
def test_blank_lines_after_function_2(self):
    contents = """
# Leading comment line 1.
# Leading comment lines 2.

def spam():
    pass

# Trailing comment line.
a = 2
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200220053212.1: *5* TestOrange.test_blank_lines_after_function_3
def test_blank_lines_after_function_3(self):
    # From leoAtFile.py.
    contents = """
def writeAsisNode(self, p):
    print('1')

    def put(s):
        print('2')

    # Trailing comment 1.
    # Trailing comment 2.
    print('3')
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200228074455.1: *5* TestOrange.test_bug_1429
def test_bug_1429(self):
    contents = r'''
def get_semver(tag):
    """bug 1429 docstring"""
    try:
        import semantic_version
        version = str(semantic_version.Version.coerce(tag, partial=True))
            # tuple of major, minor, build, pre-release, patch
            # 5.6b2 --> 5.6-b2
    except(ImportError, ValueError) as err:
        print('\n', err)
        print("""*** Failed to parse Semantic Version from git tag '{0}'.
        Expecting tag name like '5.7b2', 'leo-4.9.12', 'v4.3' for releases.
        This version can't be uploaded to PyPi.org.""".format(tag))
        version = tag
    return version
'''
    contents, tokens, tree = self.make_data(contents)
    expected = contents.strip() + '\n'
    results = self.beautify(contents, tokens, tree, max_join_line_length=0, max_split_line_length=0)
    self.assertEqual(results, expected)

# @+node:ekr.20210318055702.1: *5* TestOrange.test_bug_1851
def test_bug_1851(self):
    contents = r'''
def foo(a1):
    pass
'''
    contents, tokens, tree = self.make_data(contents)
    expected = contents.strip() + '\n'
    results = self.beautify(contents, tokens, tree, max_join_line_length=0, max_split_line_length=0)
    self.assertEqual(results, expected)

# @+node:ekr.20200209152745.1: *5* TestOrange.test_comment_indented
def test_comment_indented(self):
    line_length = 40  # For testing.
    table = (
        """
if 1:
    pass
        # An indented comment.
""",
        """
table = (
    # Indented comment.
)
""",
    )

    fails = 0
    for contents in table:
        contents, tokens, tree = self.make_data(contents)
        expected = contents
        if 0:
            dump_contents(contents)
            dump_tokens(tokens)
            # dump_tree(tokens, tree)
        results = self.beautify(
            contents,
            tokens,
            tree,
            max_join_line_length=line_length,
            max_split_line_length=line_length,
        )
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    assert not fails, fails

# @+node:ekr.20230117043931.1: *5* TestOrange.test_comment_space_after_delim
def test_comment_space_after_delim(self):
    line_length = 40  # For testing.
    table = (
        # Test 1.
        (
            """#No space after delim.\n""",
            """# No space after delim.\n""",
        ),
        # Test 2.  Don't change bang lines.
        (
            """#! /usr/bin/env python\n""",
            """#! /usr/bin/env python\n""",
        ),
        # Test 3.  Don't change ### comments.
        (
            """### To do.\n""",
            """### To do.\n""",
        ),
    )
    fails = 0
    for contents, expected in table:
        contents, tokens, tree = self.make_data(contents)
        if 0:
            dump_contents(contents)
            dump_tokens(tokens)
            # dump_tree(tokens, tree)
        results = self.beautify(
            contents,
            tokens,
            tree,
            max_join_line_length=line_length,
            max_split_line_length=line_length,
        )
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    assert not fails, fails

# @+node:ekr.20200210120455.1: *5* TestOrange.test_decorator
def test_decorator(self):
    table = (
        # Case 0.
        """
@my_decorator(1)
def func():
    pass
""",
        # Case 1.
        """
if 1:
    @my_decorator
    def func():
        pass
""",
        # Case 2.
        '''
@g.commander_command('promote')
def promote(self, event=None, undoFlag=True):
    """Make all children of the selected nodes siblings of the selected node."""
''',
    )
    for i, contents in enumerate(table):
        contents, tokens, tree = self.make_data(contents)
        expected = self.prep(contents)
        results = self.beautify(contents, tokens, tree)
        if results != expected:
            g.trace('Fail:', i)  # pragma: no cover
        self.assertEqual(results, expected)

# @+node:ekr.20200211094614.1: *5* TestOrange.test_dont_delete_blank_lines
def test_dont_delete_blank_lines(self):
    line_length = 40  # For testing.
    contents = """
class Test:

    def test_func():

        pass

    a = 2
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(
        contents,
        tokens,
        tree,
        max_join_line_length=line_length,
        max_split_line_length=line_length,
    )
    self.assertEqual(results, expected)

# @+node:ekr.20200116110652.1: *5* TestOrange.test_function_defs
def test_function_defs(self):
    table = (
        # Case 0.
        """
def f1(a=2 + 5):
    pass
""",
        # Case 2
        """
def f1():
    pass
""",
        # Case 3.
        """
def f1():
    pass
""",
        # Case 4.
        '''
def should_kill_beautify(p):
    """Return True if p.b contains @killbeautify"""
    return 'killbeautify' in g.get_directives_dict(p)
''',
    )
    for i, contents in enumerate(table):
        contents, tokens, tree = self.make_data(contents)
        expected = self.blacken(self.prep(contents))
        results = self.beautify(contents, tokens, tree)
        self.assertEqual(results, expected)

# @+node:ekr.20200116104031.1: *5* TestOrange.test_join_and_strip_condition
def test_join_and_strip_condition(self):
    contents = self.prep(
        """
        if (
            a == b or
            c == d
        ):
            pass
    """
    )
    expected = self.prep(
        """
        if (a == b or c == d):
            pass
    """
    )
    contents, tokens, tree = self.make_data(contents)
    expected = self.prep(expected)
    # Black also removes parens, which is beyond our scope at present.
    # expected = self.blacken(contents, line_length=40)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200208041446.1: *5* TestOrange.test_join_leading_whitespace
def test_join_leading_whitespace(self):
    line_length = 40  # For testing.
    table = (
        # 1234567890x1234567890x1234567890x1234567890x
        """
if 1:
    print('4444',
        '5555')
""",
        """
if 1:
    print('4444', '5555')\n""",
    )
    fails = 0
    for contents in table:
        contents, tokens, tree = self.make_data(contents)
        if 0:
            dump_contents(contents)
            dump_tokens(tokens)
            # dump_tree(tokens, tree)
        expected = contents
        # expected = self.blacken(contents, line_length=line_length)
        results = self.beautify(
            contents,
            tokens,
            tree,
            max_join_line_length=line_length,
            max_split_line_length=line_length,
        )
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    assert not fails, fails

# @+node:ekr.20200121093134.1: *5* TestOrange.test_join_lines
def test_join_lines(self):
    # Except where noted, all entries are expected values....
    line_length = 40  # For testing.
    table = (
        # 1234567890x1234567890x1234567890x1234567890x
        """print('4444',\n    '5555')""",
        """print('4444', '5555')\n""",
    )
    fails = 0
    for contents in table:
        contents, tokens, tree = self.make_data(contents)
        if 0:
            dump_contents(contents)
            dump_tokens(tokens)
            # dump_tree(tokens, tree)
        expected = contents
        results = self.beautify(
            contents,
            tokens,
            tree,
            max_join_line_length=line_length,
            max_split_line_length=line_length,
        )
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n    orange: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    self.assertEqual(fails, 0)

# @+node:ekr.20200210051900.1: *5* TestOrange.test_join_suppression
def test_join_suppression(self):
    contents = self.prep("""
        class T:
            a = 1
            print(
               a
            )
    """)
    expected = self.prep("""
        class T:
            a = 1
            print(a)
    """)
    contents, tokens, tree = self.make_data(contents)
    expected = self.prep(expected)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200207093606.1: *5* TestOrange.test_join_too_long_lines
def test_join_too_long_lines(self):
    # Except where noted, all entries are expected values....
    line_length = 40  # For testing.
    table = (
        # 1234567890x1234567890x1234567890x1234567890x
        (
            """print('aaaaaaaaaaaa',\n    'bbbbbbbbbbbb', 'cccccccccccccccc')""",
            """print('aaaaaaaaaaaa',\n    'bbbbbbbbbbbb', 'cccccccccccccccc')\n""",
        ),
    )
    fails = 0
    for contents, expected in table:
        contents, tokens, tree = self.make_data(contents)
        if 0:
            dump_contents(contents)
            dump_tokens(tokens)
            # dump_tree(tokens, tree)
        results = self.beautify(
            contents,
            tokens,
            tree,
            max_join_line_length=line_length,
            max_split_line_length=line_length,
        )
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    assert not fails, fails

# @+node:ekr.20220327131225.1: *5* TestOrange.test_leading_stars
def test_leading_stars(self):
    # #2533.
    contents = (
        """
        def f(
            arg1,
            *args,
            **kwargs
        ):
            pass
    """.strip()
        + '\n'
    )

    expected = (
        """
        def f(arg1, *args, **kwargs):
            pass
    """.strip()
        + '\n'
    )
    contents, tokens, tree = self.make_data(contents)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(expected, results)

# @+node:ekr.20200108075541.1: *5* TestOrange.test_leo_sentinels
def test_leo_sentinels_1(self):
    # Careful: don't put a sentinel into the file directly.
    # That would corrupt leoAst.py.
    sentinel = '#@+node:ekr.20200105143308.54: ** test'
    contents = f"""
{sentinel}
def spam():
    pass
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200209155457.1: *5* TestOrange.test_leo_sentinels_2
def test_leo_sentinels_2(self):
    # Careful: don't put a sentinel into the file directly.
    # That would corrupt leoAst.py.
    sentinel = '#@+node:ekr.20200105143308.54: ** test'
    contents = f"""
{sentinel}
class TestClass:
    pass
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200108082833.1: *5* TestOrange.test_lines_before_class
def test_lines_before_class(self):
    contents = """
a = 2
class aClass:
    pass
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200110014220.86: *5* TestOrange.test_multi_line_pet_peeves
def test_multi_line_pet_peeves(self):
    contents = """
if x == 4: pass
if x == 4 : pass
print (x, y); x, y = y, x
print (x , y) ; x , y = y , x
if(1):
    pass
elif(2):
    pass
while(3):
    pass
"""
    # At present Orange doesn't split lines...
    expected = """
if x == 4: pass
if x == 4: pass
print(x, y); x, y = y, x
print(x, y); x, y = y, x
if (1):
    pass
elif (2):
    pass
while (3):
    pass
"""
    contents, tokens, tree = self.make_data(contents)
    expected = self.adjust_expected(expected)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200110014220.95: *5* TestOrange.test_one_line_pet_peeves
def test_one_line_pet_peeves(self):
    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_one_line_pet_peeves'
    # Except where noted, all entries are expected values....
    if 0:
        # Test fails or recents...
        table = (
            # """a[: 1 if True else 2 :]""",
            """a[:-1]""",
        )
    else:
        table = (
            # Assignments...
            # Slices (colons)...
            """a[:-1]""",
            """a[: 1 if True else 2 :]""",
            """a[1 : 1 + 2]""",
            """a[lower:]""",
            """a[lower::]""",
            """a[:upper]""",
            """a[:upper:]""",
            """a[::step]""",
            """a[lower:upper:]""",
            """a[lower:upper:step]""",
            """a[lower + offset : upper + offset]""",
            """a[: upper_fn(x) :]""",
            """a[: upper_fn(x) : step_fn(x)]""",
            """a[:: step_fn(x)]""",
            """a[: upper_fn(x) :]""",
            """a[: upper_fn(x) : 2 + 1]""",
            """a[:]""",
            """a[::]""",
            """a[1:]""",
            """a[1::]""",
            """a[:2]""",
            """a[:2:]""",
            """a[::3]""",
            """a[1:2]""",
            """a[1:2:]""",
            """a[:2:3]""",
            """a[1:2:3]""",
            # * and **, inside and outside function calls.
            """a = b * c""",
            # Now done in test_star_star_operator
            # """a = b ** c""",  # Black has changed recently.
            """f(*args)""",
            """f(**kwargs)""",
            """f(*args, **kwargs)""",
            """f(a, *args)""",
            """f(a=2, *args)""",
            # Calls...
            """f(-1)""",
            """f(-1 < 2)""",
            """f(1)""",
            """f(2 * 3)""",
            """f(2 + name)""",
            """f(a)""",
            """f(a.b)""",
            """f(a=2 + 3, b=4 - 5, c= 6 * 7, d=8 / 9, e=10 // 11)""",
            """f(a[1 + 2])""",
            """f({key: 1})""",
            """t = (0,)""",
            """x, y = y, x""",
            # Dicts...
            """d = {key: 1}""",
            """d['key'] = a[i]""",
            # Trailing comments: expect two spaces.
            """whatever # comment""",
            """whatever  # comment""",
            """whatever   # comment""",
            # Word ops...
            """v1 = v2 and v3 if v3 not in v4 or v5 in v6 else v7""",
            """print(v7 for v8 in v9)""",
            # Unary ops...
            """v = -1 if a < b else -2""",
            # Returns...
            """return -1""",
        )
    fails = 0
    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens, tree = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, tree, filename=description)
        if results != expected:  # pragma: no cover
            fails += 1
            print('')
            print(
                f"TestOrange.test_one_line_pet_peeves: FAIL {fails}\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
    self.assertEqual(fails, 0)

# @+node:ekr.20220327135448.1: *5* TestOrange.test_relative_imports
def test_relative_imports(self):
    # #2533.
    contents = self.prep(
        """
        from .module1 import w
        from . module2 import x
        from ..module1 import y
        from .. module2 import z
        from . import a
        from.import b
        from leo.core import leoExternalFiles
        import leo.core.leoGlobals as g
    """
    )

    expected = self.prep(
        """
        from .module1 import w
        from .module2 import x
        from ..module1 import y
        from ..module2 import z
        from . import a
        from . import b
        from leo.core import leoExternalFiles
        import leo.core.leoGlobals as g
    """
    )

    contents, tokens, tree = self.make_data(contents)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(expected, results)

# @+node:ekr.20200210050646.1: *5* TestOrange.test_return
def test_return(self):
    contents = """return []"""
    expected = self.blacken(contents)
    contents, tokens, tree = self.make_data(contents)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200107174742.1: *5* TestOrange.test_single_quoted_string
def test_single_quoted_string(self):
    contents = """print('hi')"""
    # blacken suppresses string normalization.
    expected = self.blacken(contents)
    contents, tokens, tree = self.make_data(contents)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200117180956.1: *5* TestOrange.test_split_lines
def test_split_lines(self):
    line_length = 40  # For testing.
    table = (
        # 1234567890x1234567890x1234567890x1234567890x
        """
if 1:
    print('1111111111', '2222222222', '3333333333')
""",
        """print('aaaaaaaaaaaaa', 'bbbbbbbbbbbbbb', 'cccccc')""",
        """print('aaaaaaaaaaaaa', 'bbbbbbbbbbbbbb', 'cccccc', 'ddddddddddddddddd')""",
    )
    fails = 0
    for contents in table:
        contents, tokens, tree = self.make_data(contents)
        if 0:
            dump_tokens(tokens)
            # dump_tree(tokens, tree)
        expected = self.blacken(contents, line_length=line_length)
        results = self.beautify(
            contents,
            tokens,
            tree,
            max_join_line_length=line_length,
            max_split_line_length=line_length,
        )
        message = f"\n  contents: {contents!s}\n     black: {expected!s}\n    orange: {results!s}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    self.assertEqual(fails, 0)

# @+node:ekr.20200210073227.1: *5* TestOrange.test_split_lines_2
def test_split_lines_2(self):
    line_length = 40  # For testing.

    # Different from how black handles things.
    contents = """
if not any([z.kind == 'lt' for z in line_tokens]):
    return False
"""

    expected = self.prep(
        """
        if not any(
            [z.kind == 'lt' for z in line_tokens]):
            return False
    """
    )

    fails = 0
    contents, tokens, tree = self.make_data(contents)
    results = self.beautify(
        contents,
        tokens,
        tree,
        max_join_line_length=line_length,
        max_split_line_length=line_length,
    )
    message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
    if results != expected:  # pragma: no cover
        fails += 1
        print(f"Fail: {fails}\n{message}")
    self.assertEqual(fails, 0)

# @+node:ekr.20200219144837.1: *5* TestOrange.test_split_lines_3
def test_split_lines_3(self):
    line_length = 40  # For testing.
    # Different from how black handles things.
    contents = """print('eee', ('fffffff, ggggggg', 'hhhhhhhh', 'iiiiiii'), 'jjjjjjj', 'kkkkkk')"""

    # This is a bit different from black, but it's good enough for now.
    expected = self.prep(
        """
        print(
            'eee',
            ('fffffff, ggggggg', 'hhhhhhhh', 'iiiiiii'),
            'jjjjjjj',
            'kkkkkk',
        )
    """
    )
    fails = 0
    contents, tokens, tree = self.make_data(contents)
    results = self.beautify(
        contents,
        tokens,
        tree,
        max_join_line_length=line_length,
        max_split_line_length=line_length,
    )
    message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
    if results != expected:  # pragma: no cover
        fails += 1
        print(f"Fail: {fails}\n{message}")
    self.assertEqual(fails, 0)

# @+node:ekr.20220401191253.1: *5* TestOrange.test_star_star_operator
def test_star_star_operator(self):
    # Was tested in pet peeves, but this is more permissive.
    contents = """a = b ** c"""
    contents, tokens, tree = self.make_data(contents)
    # Don't rely on black for this test.
    # expected = self.blacken(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200119155207.1: *5* TestOrange.test_sync_tokens
def test_sync_tokens(self):
    contents = """if x == 4: pass"""
    # At present Orange doesn't split lines...
    expected = """if x == 4: pass"""
    contents, tokens, tree = self.make_data(contents)
    expected = self.adjust_expected(expected)
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200209161226.1: *5* TestOrange.test_ternary
def test_ternary(self):
    contents = """print(2 if name == 'class' else 1)"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected)

# @+node:ekr.20200211093359.1: *5* TestOrange.test_verbatim
def test_verbatim(self):
    line_length = 40  # For testing.

    contents = self.prep("""
@nobeautify

def addOptionsToParser(self, parser, trace_m):

    add = parser.add_option

    def add_bool(option, help, dest=None):
        add(option, action='store_true', dest=dest, help=help)

    add_bool('--diff',          'use Leo as an external git diff')
    # add_bool('--dock',          'use a Qt dock')
    add_bool('--fullscreen',    'start fullscreen')
    add_bool('--init-docks',    'put docks in default positions')
    # Multiple bool values.
    add('-v', '--version', action='store_true',
        help='print version number and exit')

# From leoAtFile.py
noDirective     =  1 # not an at-directive.
allDirective    =  2 # at-all (4.2)
docDirective    =  3 # @doc.

@beautify
""")
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(
        contents,
        tokens,
        tree,
        max_join_line_length=line_length,
        max_split_line_length=line_length,
    )
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20200211094209.1: *5* TestOrange.test_verbatim_with_pragma
def test_verbatim_with_pragma(self):
    line_length = 40  # For testing.
    contents = """
# pragma: no beautify

def addOptionsToParser(self, parser, trace_m):

    add = parser.add_option

    def add_bool(option, help, dest=None):
        add(option, action='store_true', dest=dest, help=help)

    add_bool('--diff',          'use Leo as an external git diff')
    # add_bool('--dock',          'use a Qt dock')
    add_bool('--fullscreen',    'start fullscreen')
    add_other('--window-size',  'initial window size (height x width)', m='SIZE')
    add_other('--window-spot',  'initial window position (top x left)', m='SPOT')
    # Multiple bool values.
    add('-v', '--version', action='store_true',
        help='print version number and exit')

# pragma: beautify
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(
        contents,
        tokens,
        tree,
        max_join_line_length=line_length,
        max_split_line_length=line_length,
    )
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20200729083027.1: *5* TestOrange.verbatim2
def test_verbatim2(self):
    contents = """
@beautify
@ Starts doc part
More doc part.
The @c ends the doc part.
@c
"""
    contents, tokens, tree = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens, tree)
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20240105153425.42: *4* class TestTokenBasedOrange (BaseTest)
class TestTokenBasedOrange(BaseTest):
    """
    Tests for the TokenBasedOrange class.

    Note: TokenBasedOrange never inserts or deletes lines.
    """

    @others


# @+node:ekr.20240105153425.43: *5* TestTBO.blacken
def blacken(self, contents):
    """Return the results of running black on contents"""
    if not black:
        self.skipTest('Requires Black')  # pragma: no cover
    # Suppress string normalization!
    try:
        mode = black.FileMode()
        mode.string_normalization = False
        # mode.line_length = line_length
    except TypeError:  # pragma: no cover
        self.skipTest('Requires newer version of Black')
    return black.format_str(contents, mode=mode)

# @+node:ekr.20240116104552.1: *5* TestTBO.slow_test_leoColorizer
def slow_test_leoApp(self) -> None:  # pragma: no cover
    # This test is no longer needed.
    # The tbo command beautifies all files.
    filename = 'leoColorizer.py'
    test_dir = os.path.dirname(__file__)
    path = g.os_path_finalize_join(test_dir, '..', '..', 'core', filename)
    assert os.path.exists(path), repr(path)

    tbo = TokenBasedOrange()
    tbo.filename = path

    if 0:  # Diff only.
        tbo.beautify_file(path)
    else:
        contents, tokens = self.make_file_data(path)
        expected = contents
        results = self.beautify(contents, tokens)

        regularized_expected = tbo.regularize_newlines(expected)
        regularized_results = tbo.regularize_newlines(results)

        if regularized_expected != regularized_results:
            tbo.show_diffs(regularized_expected, regularized_results)

        assert regularized_expected == regularized_results

# @+node:ekr.20240105153425.45: *5* TestTBO.test_annotations
def test_annotations(self):  # Required for full coverage.
    table = (
        # Case 0.
        """s: str = None\n""",
        # Case 1.
        (
            """
            def annotated_f(s: str = None, x=None) -> None:
                pass
        """
        ),
        # Case 2.
        (
            """
            def f1():
                self.rulesetName : str = ''
        """
        ),
    )
    for i, contents in enumerate(table):
        contents, tokens = self.make_data(contents)
        expected = self.blacken(contents).rstrip() + '\n'
        results = self.beautify(contents, tokens)
        if results != expected:  # pragma: no cover
            g.printObj(contents, tag='Contents')
            g.printObj(expected, tag='Expected (blackened)')
            g.printObj(results, tag='Results')
        self.assertEqual(results, expected)

# @+node:ekr.20240105153425.46: *5* TestTBO.test_at_doc_part
def test_at_doc_part(self):
    contents = """
@ Line 1
Line 2
@c

print('hi')
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.47: *5* TestTBO.test_backslash_newline
def test_backslash_newline(self):
    """
    This test is necessarily different from black, because orange doesn't
    delete semicolon tokens.
    """
    contents = r"""
print(a);\
print(b)
print(c); \
print(d)
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    # expected = self.blacken(contents).rstrip() + '\n'
    results = self.beautify(contents, tokens)
    # g.printObj(tokens, tag='Tokens')
    # g.printObj(results, tag='Results')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.48: *5* TestTBO.test_blank_lines_after_function
def test_blank_lines_after_function(self):
    contents = """
# Comment line 1.
# Comment line 2.

def spam():
    pass
    # Properly indented comment.

# Comment line3.
# Comment line4.
a = 2
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.49: *5* TestTBO.test_blank_lines_after_function_2
def test_blank_lines_after_function_2(self):
    contents = """
# Leading comment line 1.
# Leading comment lines 2.

def spam():
    pass

# Trailing comment line.
a = 2
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.50: *5* TestTBO.test_blank_lines_after_function_3
def test_blank_lines_after_function_3(self):
    # From leoAtFile.py.
    contents = """
def writeAsisNode(self, p):
    print('1')

    def put(s):
        print('2')

    # Trailing comment 1.
    # Trailing comment 2.
    print('3')
"""

    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.53: *5* TestTBO.test_comment_indented
def test_comment_indented(self):
    table = (
        """
        if 1:
            pass
                # An indented comment.
        """,
        """
        table = (
            # Regular comment.
        )
        """,
    )
    fails = 0
    for contents in table:
        contents, tokens = self.make_data(contents)
        expected = contents
        results = self.beautify(contents, tokens)
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}")
            g.printObj(results, tag='Results')
            g.printObj(expected, tag='Expected')
    assert not fails, fails

# @+node:ekr.20240105153425.54: *5* TestTBO.test_comment_space_after_delim
def test_comment_space_after_delim(self):
    table = (
        # Test 1.
        (
            """#No space after delim.\n""",
            """# No space after delim.\n""",
        ),
        # Test 2.  Don't change bang lines.
        (
            """#! /usr/bin/env python\n""",
            """#! /usr/bin/env python\n""",
        ),
        # Test 3.  Don't change ### comments.
        (
            """### To do.\n""",
            """### To do.\n""",
        ),
    )
    fails = 0
    for contents, expected in table:
        contents, tokens = self.make_data(contents)
        results = self.beautify(contents, tokens)
        message = f"\n  contents: {contents!r}\n  expected: {expected!r}\n       got: {results!r}"
        if results != expected:  # pragma: no cover
            fails += 1
            print(f"Fail: {fails}\n{message}")
    assert not fails, fails

# @+node:ekr.20240105153425.55: *5* TestTBO.test_decorators
def test_decorators(self):
    table = (
        # Case 0.
        """
@my_decorator(1)
def func():
    pass
""",
        # Case 1.
        """
if 1:
    @my_decorator
    def func():
        pass
""",
        # Case 2.
        '''
@g.commander_command('promote')
def promote(self, event=None, undoFlag=True):
    """Make all children of the selected nodes siblings of the selected node."""
''',
    )
    for i, contents in enumerate(table):
        contents, tokens = self.make_data(contents)
        expected = contents
        results = self.beautify(contents, tokens)
        if results != expected:
            g.trace('Fail:', i)  # pragma: no cover
            g.printObj(contents, tag='Contents')
            g.printObj(results, tag='Results')
        self.assertEqual(results, expected)

# @+node:ekr.20240105153425.56: *5* TestTBO.test_dont_delete_blank_lines
def test_dont_delete_blank_lines(self):
    contents = """
class Test:

    def test_func():

        pass

    a = 2
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20250506092303.1: *5* TestTBO.test_flake8_errors
def test_flake8_errors(self):
    tag = 'test_flake8_errors'

    # Part 1: tests w/o black.
    non_black_table = (
        # leoAbbrevCommands.py: line 160.
        """c.abbrev_subst_env = {'c': c, 'g': g, '_values': {}, }\n""",
    )
    for i, contents in enumerate(non_black_table):
        description = f"{tag} (w/o black) part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = contents
        results = self.beautify(contents, tokens, filename=description)
        if 0:  # pragma: no cover
            g.printObj(tokens, tag=description)
            g.printObj(expected, tag='Expected')
            g.printObj(results, tag='Results')
        if results != expected:  # pragma: no cover
            print('')
            print(
                f"TestTokenBasedOrange.{tag}: FAIL\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
        self.assertEqual(results, expected, msg=description)

    # All entries are expected values...
    black_table = (
        # leoserver.py: line 1575.
        """s = s[len('@nocolor') :]\n""",
        # leoGlobals.py.
        """g.pr(f"{self.calledDict.get(key,0):d}", key)""",  # line 1805
        """tracing_tags [id(self)] = tag""",  # line 1913.
        # make_stub_files.py.
        """def match(self, s, trace=False):\n    pass""",
    )
    for i, contents in enumerate(black_table):
        description = f"{tag} (black) part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            # g.printObj(tokens, tag='Tokens')
            g.printObj(expected, tag='Expected')
            g.printObj(results, tag='Results')
        self.assertEqual(results, expected, msg=description)

# @+node:ekr.20240105153425.65: *5* TestTBO.test_leo_sentinels
def test_leo_sentinels_1(self):
    # Careful: don't put a sentinel into the file directly.
    # That would corrupt leoAst.py.
    sentinel = '#@+node:ekr.20200105143308.54: ** test'
    contents = f"""
{sentinel}
def spam():
    pass
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.66: *5* TestTBO.test_leo_sentinels_2
def test_leo_sentinels_2(self):
    # Careful: don't put a sentinel into the file directly.
    # That would corrupt leoAst.py.
    sentinel = '#@+node:ekr.20200105143308.54: ** test'
    contents = f"""
{sentinel}
class TestClass:
    pass
"""
    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.67: *5* TestTBO.test_lines_before_class
def test_lines_before_class(self):
    contents = """
a = 2
class aClass:
    pass
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240128181802.1: *5* TestTBO.test_multi_line_imports
def test_multi_line_imports(self):
    # The space between 'import' and '(' is correct.
    contents = """
        from .module1 import (
            w1,
            w2,
        )
        from .module1 import \\
            w
        from .module1 import (
            w1,
            w2,
        )
        import leo.core.leoGlobals \\
            as g
    """
    contents, tokens = self.make_data(contents)
    expected = contents.strip() + '\n'
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.68: *5* TestTBO.test_multi_line_pet_peeves
def test_multi_line_pet_peeves(self):
    contents = """
        if x == 4: pass
        if x == 4 : pass
        print (x, y); x, y = y, x
        print (x , y) ; x , y = y , x
        if(1):
            pass
        elif(2):
            pass
        while(3):
            pass
    """
    # At present Orange doesn't split lines...
    expected = self.prep(
        """
            if x == 4: pass
            if x == 4: pass
            print(x, y); x, y = y, x
            print(x, y); x, y = y, x
            if (1):
                pass
            elif (2):
                pass
            while (3):
                pass
        """
    )
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240420050005.1: *5* TestTBO.test_multi_line_statement
def test_multi_line_statements(self):
    contents = """
        if 1:  # Simulate indent.
            def orange_command(
                s: str,
            ) -> None:
                pass
        if 1:  # The trailing ]) causes the problem.
            return ''.join(
                ['%s%s' % (sep, self.dump_ast(z, level + 1)) for z in node])
    """

    expected = self.prep(
        """
        if 1:  # Simulate indent.
            def orange_command(
                s: str,
            ) -> None:
                pass
        if 1:  # The trailing ]) causes the problem.
            return ''.join(
                ['%s%s' % (sep, self.dump_ast(z, level + 1)) for z in node])
        """
    )
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected)

# @+node:ekr.20250202043822.1: *5* TestTBO.test_nested_fstrings
def test_nested_fstrings(self):
    # https://github.com/leo-editor/leo-editor/issues/4289
    contents = """
        diff_str = f"{CMPS[cmp(old, new)]}{(diff and f'{diff:.2f}') or ''}"
    """

    contents, tokens = self.make_data(contents)
    expected = contents.rstrip() + '\n'
    results = self.beautify(contents, tokens)
    # g.printObj(tokens, tag='Tokens')
    # g.printObj(results, tag='Results')
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.69: *5* TestTBO.test_one_line_pet_peeves
def test_one_line_pet_peeves(self):
    # One-line pet peeves, except those involving slices and unary ops.

    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_one_line_pet_peeves'

    # Except where noted, all entries are expected values...
    table = (
        # #4420
        """default, *_ = node.infer()""",  # From pylint.
        # Assignments...
        """a = b * c""",
        """a = b + c""",
        """a = b - c""",
        # * and **, inside and outside function calls.
        """f(*args)""",
        """f(**kwargs)""",
        """f(*args, **kwargs)""",
        """f(a, *args)""",
        """f(a=2, *args)""",
        # Calls...
        """f(-1)""",
        """f(-1 < 2)""",
        """f(1)""",
        """f(2 * 3)""",
        """f(2 + name)""",
        """f(a)""",
        """f(a.b)""",
        """f(a=2 + 3, b=4 - 5, c= 6 * 7, d=8 / 9, e=10 // 11)""",
        """f(a[1 + 2])""",
        """f({key: 1})""",
        """t = (0,)""",
        """x, y = y, x""",
        # Dicts...
        """d = {key: 1}""",
        """d['key'] = a[i]""",
        # Trailing comments: expect two spaces.
        """whatever # comment""",
        """whatever  # comment""",
        """whatever   # comment""",
        # Word ops...
        """v1 = v2 and v3 if v3 not in v4 or v5 in v6 else v7""",
        """print(v7 for v8 in v9)""",
        # Returns...
        """return -1""",
    )
    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            print(
                f"TestTokenBasedOrange.{tag}: FAIL\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
        self.assertEqual(results, expected, msg=description)

# @+node:ekr.20240128002403.1: *5* TestTBO.test_percent_op
def test_percent_op(self):
    # leo/plugins/writers/basewriter.py, line 38
    contents = """at.os('%s@+node:%s%s' % (delim, s, delim2))"""
    contents, tokens = self.make_data(contents)
    expected = self.blacken(contents).rstrip() + '\n'
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20240105153425.70: *5* TestTBO.test_relative_imports
def test_relative_imports(self):
    # #2533.
    contents = """
        from .module1 import w
        from . module2 import x
        from ..module1 import y
        from .. module2 import z
        from . import a
        from.import b
        from .. import c
        from..import d
        from leo.core import leoExternalFiles
        import leo.core.leoGlobals as g
    """
    expected = self.prep(
        """
        from .module1 import w
        from .module2 import x
        from ..module1 import y
        from ..module2 import z
        from . import a
        from . import b
        from .. import c
        from .. import d
        from leo.core import leoExternalFiles
        import leo.core.leoGlobals as g
    """
    )
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        # g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    self.assertEqual(expected, results)

# @+node:ekr.20240109090653.1: *5* TestTBO.test_slice
def test_slice(self):
    # Test one-line pet peeves involving slices.

    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_slice'

    # Except where noted, all entries are expected values....
    table = (
        # Differences between leoAst.py and leoTokens.py.
        # tbo.cmd changes several files, including these.
        # In all cases, the differences make leoTokens.py *more*
        # compatible with Black than leoAst.py!
        # #4420: From pylint.
        """base_name = ".".join(package.split(".", self.depth)[: self.depth])""",
        """ws = contents[self.prev_offset : s_offset]""",  # Follow-on test.
        # From leoAst.py.
        """val = val[:i] + '# ' + val[i + 1 :]\n""",
        # From leoApp.py.
        """
                for name in rf.getRecentFiles()[:n]:
                    pass
            """,
        # From leoUndo.py.
        """s.extend(body_lines[-trailing:])\n""",
        # From leoTokens.py.
        # The expected value of the two last lines is the last line.
        """
                if line1.startswith(tag) and line1.endswith(tag2):
                    e = line1[n1 : -n2].strip()
                    e = line1[n1:-n2].strip()
            """,
        # Legacy tests...
        """a[:-1]""",
        """a[: 1 if True else 2 :]""",
        """a[1 : 1 + 2]""",
        """a[lower:]""",
        """a[lower::]""",
        """a[:upper]""",
        """a[:upper:]""",
        """a[::step]""",
        """a[lower:upper:]""",
        """a[lower:upper:step]""",
        """a[lower + offset : upper + offset]""",
        """a[: upper_fn(x) :]""",
        """a[: upper_fn(x) : step_fn(x)]""",
        """a[:: step_fn(x)]""",
        """a[: upper_fn(x) :]""",
        """a[: upper_fn(x) : 2 + 1]""",
        """a[:]""",
        """a[::]""",
        """a[1:]""",
        """a[1::]""",
        """a[:2]""",
        """a[:2:]""",
        """a[::3]""",
        """a[1:2]""",
        """a[1:2:]""",
        """a[:2:3]""",
        """a[1:2:3]""",
    )

    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            print('TestTokenBasedOrange')
            # g.printObj(contents, tag='Contents')
            g.printObj(expected, tag='Expected (Black)')
            g.printObj(results, tag='Results')
        self.assertEqual(expected, results)

# @+node:ekr.20250505153051.1: *5* TestTBO.test_spaces_after_keyword
def test_spaces_after_keyword(self):
    contents = """return  ''\n"""
    expected = """return ''\n"""
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    assert results == expected

# @+node:ekr.20240105153425.76: *5* TestTBO.test_star_star_operator
def test_star_star_operator(self):
    # Don't rely on black for this test.
    contents = """a = b ** c"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected)

# @+node:ekr.20250505152406.1: *5* TestTBO.test_trailing_ws_in_docstring
def test_trailing_ws_in_docstring(self):
    expected = textwrap.dedent('''\
"""
This line contains trailing ws
"""
''')
    # Create the trailing ws by hand so flake8 doesn't complain!
    contents = expected.replace('ws', 'ws  ')
    assert contents != expected
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    assert results == expected

# @+node:ekr.20240109070553.1: *5* TestTBO.test_unary_ops
def test_unary_ops(self):
    # One-line pet peeves involving unary ops but *not* slices.

    # See https://peps.python.org/pep-0008/#pet-peeves
    # See https://peps.python.org/pep-0008/#other-recommendations

    tag = 'test_unary_op'

    # All entries are expected values....
    table = (
        # Calls...
        """f(-1)""",
        """f(-1 < 2)""",
        # Dicts...
        """d = {key: -3}""",
        """d['key'] = a[-i]""",
        # Unary minus...
        """v = -1 if a < b else -2""",
        # ~
        """a = ~b""",
        """c = a[:~e:]""",
        # """d = a[f() - 1 :]""",
        # """e = a[2 - 1 :]""",
        # """e = a[b[2] - 1 :]""",
    )
    for i, contents in enumerate(table):
        description = f"{tag} part {i}"
        contents, tokens = self.make_data(contents, description=description)
        expected = self.blacken(contents)
        results = self.beautify(contents, tokens, filename=description)
        if results != expected:  # pragma: no cover
            print('')
            print(
                f"TestTokenBasedOrange.{tag}:\n"
                f"  contents: {contents.rstrip()}\n"
                f"     black: {expected.rstrip()}\n"
                f"    orange: {results.rstrip() if results else 'None'}"
            )
        self.assertEqual(results, expected, msg=description)

# @+node:ekr.20240105153425.79: *5* TestTBO.test_verbatim
def test_verbatim(self):
    contents = """
@nobeautify

def addOptionsToParser(self, parser, trace_m):

    add = parser.add_option

    def add_bool(option, help, dest=None):
        add(option, action='store_true', dest=dest, help=help)

    add_bool('--diff',          'use Leo as an external git diff')
    # add_bool('--dock',          'use a Qt dock')
    add_bool('--fullscreen',    'start fullscreen')
    add_bool('--init-docks',    'put docks in default positions')
    # Multiple bool values.
    add('-v', '--version', action='store_true',
        help='print version number and exit')

# From leoAtFile.py
noDirective     =  1 # not an at-directive.
allDirective    =  2 # at-all (4.2)
docDirective    =  3 # @doc.


"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20240105153425.80: *5* TestTBO.test_verbatim_with_pragma
def test_verbatim_with_pragma(self):
    contents = """
# pragma: no beautify

def addOptionsToParser(self, parser, trace_m):

    add = parser.add_option

    def add_bool(option, help, dest=None):
        add(option, action='store_true', dest=dest, help=help)

    add_bool('--diff',          'use Leo as an external git diff')
    # add_bool('--dock',          'use a Qt dock')
    add_bool('--fullscreen',    'start fullscreen')
    add_other('--window-size',  'initial window size (height x width)', m='SIZE')
    add_other('--window-spot',  'initial window position (top x left)', m='SPOT')
    # Multiple bool values.
    add('-v', '--version', action='store_true',
        help='print version number and exit')

# pragma: beautify
"""
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20250505231955.1: *5* TestTBO.test_ws_in_blank_line
def test_ws_in_blank_line(self):
    contents = 'line 1\n   \nline 2\n'
    expected = 'line 1\n\nline 2\n'
    contents, tokens = self.make_data(contents)
    results = self.beautify(contents, tokens)
    if results != expected:  # pragma: no cover
        g.printObj(tokens, tag='Tokens')
        g.printObj(results, tag='Results')
        g.printObj(expected, tag='Expected')
    assert results == expected

# @+node:ekr.20240105153425.81: *5* TestTBO.verbatim2
def test_verbatim2(self):
    # We *do* want this test to contain verbatim sentinels.
    contents = """
@beautify
###@nobeautify
@ Starts doc part
More doc part.
The @c ends the doc part.
@c
    """
    contents, tokens = self.make_data(contents)
    expected = contents
    results = self.beautify(contents, tokens)
    # g.printObj(results, tag='Results')
    # g.printObj(expected, tag='Expected')
    self.assertEqual(results, expected, msg=contents)

# @+node:ekr.20240105145241.1: *4* class TokenBasedOrange
class TokenBasedOrange:  # Orange is the new Black.
    << TokenBasedOrange: docstring >>
    << TokenBasedOrange: __slots__ >>
    << TokenBasedOrange: python-related constants >>

    @others


# @+node:ekr.20240119062227.1: *5* << TokenBasedOrange: docstring >>
@language rest
@wrap

"""
Leo's token-based beautifier, three times faster than the beautifier in leoAst.py.

**Design**

The *pre_scan* method is the heart of the algorithm. It sets context
for the `:`, `=`, `**` and `.` tokens *without* using the parse tree.
*pre_scan* calls three *finishers*.

Each finisher uses a list of *relevant earlier tokens* to set the
context for one kind of (input) token. Finishers look behind (in the
stream of input tokens) with essentially no cost.

After the pre-scan, *tbo.beautify* (the main loop) calls *visitors*
for each separate type of *input* token.

Visitors call *code generators* to generate strings in the output
list, using *lazy evaluation* to generate whitespace.
"""

# @+node:ekr.20240111035404.1: *5* << TokenBasedOrange: __slots__ >>
__slots__ = [
    # Command-line arguments.
    'all',
    'beautified',
    'diff',
    'report',
    'write',
    # Global data.
    'contents',
    'filename',
    'input_tokens',
    'output_list',
    'tab_width',
    'insignificant_tokens',  # New.
    # Token-related data for visitors.
    'index',
    'input_token',
    'line_number',
    'pending_lws',
    'pending_ws',
    'prev_output_kind',
    'prev_output_value',
    # Parsing state for visitors.
    'decorator_seen',
    'in_arg_list',
    'in_doc_part',
    'state_stack',
    'verbatim',
    # Whitespace state. Don't even *think* about changing these!
    'curly_brackets_level',
    'indent_level',
    'lws',
    'paren_level',
    'square_brackets_stack',
    # Regular expressions.
    'at_others_pat',
    'beautify_pat',
    'comment_pat',
    'end_doc_pat',
    'nobeautify_pat',
    'nobeautify_sentinel_pat',
    'node_pat',
    'start_doc_pat',
]
# @+node:ekr.20240116040458.1: *5* << TokenBasedOrange: python-related constants >>
insignificant_kinds = (
    'comment',
    'dedent',
    'encoding',
    'endmarker',
    'indent',
    'newline',
    'nl',
    'ws',
)

# 'name' tokens that may appear in expressions.
operator_keywords = (
    'await',  # Debatable.
    'and',
    'in',
    'not',
    'not in',
    'or',  # Operators.
    'True',
    'False',
    'None',  # Values.
)
# @+node:ekr.20240105145241.2: *5* tbo.ctor
def __init__(self, settings: Optional[SettingsDict] = None):
    """Ctor for Orange class."""

    # Set default settings.
    if settings is None:
        settings = {}

    # Hard-code 4-space tabs.
    self.tab_width = 4

    # Define tokens even for empty files.
    self.input_token: InputToken = None
    self.input_tokens: list[InputToken] = []
    self.lws: str = ""  # Set only by Indent/Dedent tokens.
    self.pending_lws: str = ""
    self.pending_ws: str = ""

    # Set by gen_token and all do_* methods that bypass gen_token.
    self.prev_output_kind: str = None
    self.prev_output_value: str = None

    # Set ivars from the settings dict *without* using setattr.
    self.all = settings.get('all', False)
    self.beautified = settings.get('beautified', False)
    self.diff = settings.get('diff', False)
    self.report = settings.get('report', False)
    self.write = settings.get('write', False)

    # The list of tokens that tbo._next/_prev skip.
    self.insignificant_tokens = (
        'comment',
        'dedent',
        'indent',
        'newline',
        'nl',
        'ws',
    )

    # fmt: off

    # General patterns.
    self.beautify_pat = re.compile(
        r'#\s*pragma:\s*beautify\b|#\s*@@beautify|#\s*@\+node|#\s*@[+-]others|#\s*@[+-]<<')
    self.comment_pat = re.compile(r'^(\s*)#[^@!# \n]')
    self.nobeautify_pat = re.compile(r'\s*#\s*pragma:\s*no\s*beautify\b|#\s*@@nobeautify')
    self.nobeautify_sentinel_pat = re.compile(r'^#\s*@@nobeautify\s*$', re.MULTILINE)

    # Patterns from FastAtRead class, specialized for python delims.
    self.node_pat = re.compile(r'^(\s*)#@\+node:([^:]+): \*(\d+)?(\*?) (.*)$')  # @node
    self.start_doc_pat = re.compile(r'^\s*#@\+(at|doc)?(\s.*?)?$')  # @doc or @
    self.at_others_pat = re.compile(r'^(\s*)#@(\+|-)others\b(.*)$')  # @others

    # Doc parts end with @c or a node sentinel. Specialized for python.
    self.end_doc_pat = re.compile(r"^\s*#@(@(c(ode)?)|([+]node\b.*))$")

    # fmt: on

# @+node:ekr.20240126012433.1: *5* tbo: Checking & dumping
# @+node:ekr.20240106220724.1: *6* tbo.dump_token_range
def dump_token_range(self, i1: int, i2: int, tag: Optional[str] = None) -> None:  # pragma: no cover
    """Dump the given range of input tokens."""
    if tag:
        print(tag)
    for token in self.input_tokens[i1 : i2 + 1]:
        print(token.dump())

# @+node:ekr.20240112082350.1: *6* tbo.internal_error_message
def internal_error_message(self) -> None:  # pragma: no cover
    """Print the details of the internal error."""
    # Compute data.
    line_number = self.input_token.line_number
    lines = g.splitLines(self.contents)
    n1 = max(0, line_number - 5)
    n2 = min(line_number + 5, len(lines))
    prev_lines = ['\n']
    for i in range(n1, n2):
        marker_s = '***' if i + 1 == line_number else '   '
        prev_lines.append(f"Line {i + 1:5}:{marker_s}{lines[i]!r}\n")

    # Print the messages.
    print('')
    g.es_print('Error in token-based beautifier!')
    g.es_print(f"At token {self.index}, line: {line_number} file: {self.filename}")
    g.es_print("Please report this message to Leo's developers")
    print('')
    g.es_print(g.objToString(prev_lines))

# @+node:ekr.20240226131015.1: *6* tbo.user_error_message
def user_error_message(self, message: str) -> str:  # pragma: no cover
    """Print a message about a user error."""
    # Compute lines_s.
    line_number = self.input_token.line_number
    lines = g.splitLines(self.contents)
    n1 = max(0, line_number - 5)
    n2 = min(line_number + 5, len(lines))
    prev_lines = ['\n']
    for i in range(n1, n2):
        marker_s = '***' if i + 1 == line_number else '   '
        prev_lines.append(f"Line {i + 1:5}:{marker_s}{lines[i]!r}\n")
    context_s = ''.join(prev_lines) + '\n'

    # Return the full error message.
    return f"{message.strip()}\n\nAt token {self.index}, line: {line_number} file: {self.filename}\n{context_s}"

# @+node:ekr.20240117053310.1: *6* tbo.oops
def oops(self, message: str) -> None:  # pragma: no cover
    """Raise InternalBeautifierError."""
    self.internal_error_message()
    raise InternalBeautifierError(message)

# @+node:ekr.20240105145241.4: *5* tbo: Entries & helpers
# @+node:ekr.20240105145241.5: *6* tbo.beautify (main token loop)
def no_visitor(self) -> None:  # pragma: no cover
    self.oops(f"Unknown kind: {self.input_token.kind!r}")

def beautify(
    self,
    contents: str,
    filename: str,
    input_tokens: list[InputToken],
) -> str:
    """
    The main line. Create output tokens and return the result as a string.

    beautify_file and beautify_file_def call this method.
    """
    << tbo.beautify: init ivars >>

    try:
        # Pre-scan the token list, setting context.s
        self.pre_scan()

        # Init ivars first.
        self.input_token = None
        self.pending_lws = ''
        self.pending_ws = ''
        self.prev_output_kind = None
        self.prev_output_value = None

        # Init state.
        self.gen_token('file-start', '')
        self.push_state('file-start')

        # The main loop:
        prev_line_number: int = 0
        for self.index, self.input_token in enumerate(input_tokens):
            # Set global for visitors.
            if prev_line_number != self.input_token.line_number:
                prev_line_number = self.input_token.line_number
            # Call the proper visitor.
            if self.verbatim:
                self.do_verbatim()
            else:
                func = getattr(self, f"do_{self.input_token.kind}", self.no_visitor)
                func()

        # Return the result.
        result = ''.join(self.output_list)
        return result

    # Make no change if there is any error.
    except InternalBeautifierError as e:  # pragma: no cover
        # oops calls self.internal_error_message to creates e.
        print(repr(e))
    except AssertionError as e:  # pragma: no cover
        print(f"AssertionError: {e!r}")
        self.internal_error_message()
    return contents

# @+node:ekr.20240112023403.1: *7* << tbo.beautify: init ivars >>
# Debugging vars...
self.contents = contents
self.filename = filename
self.line_number: Optional[int] = None

# The input and output lists...
self.output_list: list[str] = []
self.input_tokens = input_tokens  # The list of input tokens.

# State vars for whitespace.
self.curly_brackets_level = 0  # Number of unmatched '{' tokens.
self.paren_level = 0  # Number of unmatched '(' tokens.
self.square_brackets_stack: list[bool] = []  # A stack of bools, for self.gen_word().
self.indent_level = 0  # Set only by do_indent and do_dedent.

# Parse state.
self.decorator_seen = False  # Set by do_name for do_op.
self.in_arg_list = 0  # > 0 if in an arg list of a def.
self.in_doc_part = False
self.state_stack: list[ParseState] = []  # Stack of ParseState objects.

# Leo-related state.
self.verbatim = False  # True: don't beautify.

# Ivars describing the present input token...
self.index = 0  # The index within the tokens array of the token being scanned.
self.lws = ''  # Leading whitespace. Required!
# @+node:ekr.20240105145241.6: *6* tbo.beautify_file (entry) (stats & diffs)
def beautify_file(self, filename: str) -> bool:  # pragma: no cover
    """
    TokenBasedOrange: Beautify the the given external file.

    Return True if the file was beautified.
    """
    if 0:
        print(
            f"all: {int(self.all)} "
            f"beautified: {int(self.beautified)} "
            f"diff: {int(self.diff)} "
            f"report: {int(self.report)} "
            f"write: {int(self.write)} "
            f"{g.shortFileName(filename)}"
        )
    self.filename = filename
    contents, tokens = self.init_tokens_from_file(filename)
    if not (contents and tokens):
        return False  # Not an error.
    if self.nobeautify_sentinel_pat.search(contents):
        return False  # Honor @nobeautify sentinel within the file.
    if not isinstance(tokens[0], InputToken):
        self.oops(f"Not an InputToken: {tokens[0]!r}")

    # Beautify the contents, returning the original contents on any error.
    results = self.beautify(contents, filename, tokens)

    # Ignore changes only to newlines.
    if self.regularize_newlines(contents) == self.regularize_newlines(results):
        return False

    # Print reports.
    if self.beautified:  # --beautified.
        print(f"tbo: beautified: {g.shortFileName(filename)}")
    if self.diff:  # --diff.
        print(f"Diffs: {filename}")
        self.show_diffs(contents, results)

    # Write the (changed) file .
    if self.write:  # --write.
        self.write_file(filename, results)
    return True

# @+node:ekr.20250508041634.1: *6* tbo.beautify_script_tree (entry) and helper
def beautify_script_tree(self, root: Position) -> None:
    """Undoably beautify root's entire tree."""
    c = root.v.context
    u, undoType = c.undoer, 'beautify-script'
    first_p = c.p
    n_changed = 0
    for p in root.self_and_subtree():
        bunch = u.beforeChangeNodeContents(p)
        changed = self.beautify_script_node(p)
        if changed:
            if n_changed == 0:  # #4443.
                u.beforeChangeGroup(first_p, undoType)
            n_changed += 1
            u.afterChangeNodeContents(p, undoType, bunch)
    if n_changed:
        u.afterChangeGroup(root, undoType)
        c.redraw(root)
        if not g.unitTesting:
            g.es_print(f"Beautified {n_changed} node{g.plural(n_changed)}")

# @+node:ekr.20250508030747.1: *7* tbo.beautify_script_node
def beautify_script_node(self, p: Position) -> bool:
    """Beautify a single node"""

    # Patterns for lines that must be replaced.
    section_ref_pat = re.compile(r'(\s*)\<\<(.+)\>\>(.*)')
    nobeautify_pat = re.compile(r'(\s*)\@nobeautify(.*)')
    trailing_ws_pat = re.compile(r'(.*)#(.*)')

    # Part 1: Replace @others and section references with 'pass'
    #         This hack is valid!
    indices: list[int] = []  # Indices of replaced lines.
    contents: list[str] = []  # Contents after replacements.
    for i, s in enumerate(g.splitLines(p.b)):
        if m := section_ref_pat.match(s):
            contents.append(f"{m.group(1)}pass\n")
            indices.append(i)
        elif m := nobeautify_pat.match(s):
            return False
        else:
            contents.append(s)

    # Part 2: Beautify.
    self.indent_level = 0
    self.filename = 'beautify-script'
    contents_s = ''.join(contents)
    tokens = Tokenizer().make_input_tokens(contents_s)
    if not tokens:
        return False
    results_s: str = self.beautify(contents_s, self.filename, tokens)

    # Part 3: Undo replacements, regularize comments and clean trailing ws.
    body_lines: list[str] = g.splitLines(p.b)
    results: list[str] = g.splitLines(results_s)
    for i in indices:
        old_line = body_lines[i]
        if m := trailing_ws_pat.match(old_line):
            old_line = f"{m.group(1).rstrip()}  #{m.group(2)}"
        results[i] = old_line.rstrip() + '\n'

    # Part 4: Update the body if necessary.
    new_body = ''.join(results)
    changed = p.b.rstrip() != new_body.rstrip()
    if changed:
        p.b = new_body
    return changed

# @+node:ekr.20240105145241.8: *6* tbo.init_tokens_from_file
def init_tokens_from_file(self, filename: str) -> tuple[str, list[InputToken]]:  # pragma: no cover
    """
    Create the list of tokens for the given file.
    Return (contents, encoding, tokens).
    """
    self.indent_level = 0
    self.filename = filename
    t1 = time.perf_counter_ns()
    contents = g.readFile(filename)
    t2 = time.perf_counter_ns()
    if not contents:
        self.input_tokens = []
        return '', []
    t3 = time.perf_counter_ns()
    self.input_tokens = input_tokens = Tokenizer().make_input_tokens(contents)
    t4 = time.perf_counter_ns()
    if 0:
        print(f"       read: {(t2 - t1) / 1000000:6.2f} ms")
        print(f"make_tokens: {(t4 - t3) / 1000000:6.2f} ms")
        print(f"      total: {(t4 - t1) / 1000000:6.2f} ms")
    return contents, input_tokens

# @+node:ekr.20240105140814.12: *6* tbo.regularize_newlines
def regularize_newlines(self, s: str) -> str:
    """Regularize newlines within s."""
    return s.replace('\r\n', '\n').replace('\r', '\n')

# @+node:ekr.20240105140814.17: *6* tbo.write_file
def write_file(self, filename: str, s: str) -> None:  # pragma: no cover
    """
    Write the string s to the file whose name is given.

    Handle all exceptions.

    Before calling this function, the caller should ensure
    that the file actually has been changed.
    """
    try:
        s2 = g.toEncodedString(s)  # May raise exception.
        with open(filename, 'wb') as f:
            f.write(s2)
    except Exception as e:  # pragma: no cover
        print(f"Error {e!r}: {filename!r}")

# @+node:ekr.20200107040729.1: *6* tbo.show_diffs
def show_diffs(self, s1: str, s2: str) -> None:  # pragma: no cover
    """Print diffs between strings s1 and s2."""
    filename = self.filename
    lines = list(
        difflib.unified_diff(
            g.splitLines(s1),
            g.splitLines(s2),
            fromfile=f"Old {filename}",
            tofile=f"New {filename}",
        )
    )
    print('')
    print(f"Diffs for {filename}")
    for line in lines:
        print(line)

# @+node:ekr.20240105145241.9: *5* tbo: Visitors & generators
# Visitors (tbo.do_* methods) handle input tokens.
# Generators (tbo.gen_* methods) create zero or more output tokens.
# @+node:ekr.20240105145241.10: *6* tbo.do_comment
def do_comment(self) -> None:
    """Handle a comment token."""
    val = self.input_token.value
    << do_comment: update comment-related state >>

    # Generate the comment.
    self.pending_lws = ''
    self.pending_ws = ''
    entire_line = self.input_token.line.lstrip().startswith('#')
    if entire_line:
        # The comment includes all ws.
        # #1496: No further munging needed.
        val = self.input_token.line.rstrip()
        # #3056: Insure one space after '#' in non-sentinel comments.
        #        Do not change bang lines or '##' comments.
        if m := self.comment_pat.match(val):
            i = len(m.group(1))
            val = val[:i] + '# ' + val[i + 1 :]
    else:
        # Exactly two spaces before trailing comments.
        i = val.find('#')
        if i == -1:
            g.trace('OOPS', repr(val), g.callers())
        # Special case for ###.
        elif val[i:].startswith('###'):
            val = val[:i].rstrip() + '  ### ' + val[i + 3 :].strip()
        else:
            val = val[:i].rstrip() + '  # ' + val[i + 1 :].strip()
    self.gen_token('comment', val.rstrip())

# @+node:ekr.20240420034216.1: *7* << do_comment: update comment-related state >>
# Leo-specific code...
if self.node_pat.match(val):
    # Clear per-node state.
    self.in_doc_part = False
    self.verbatim = False
    self.decorator_seen = False
    # Do *not* clear other state, which may persist across @others.
    # self.curly_brackets_level = 0
    # self.in_arg_list = 0
    # self.indent_level = 0
    # self.lws = ''
    # self.paren_level = 0
    # self.square_brackets_stack = []
    # self.state_stack = []
else:
    # Keep track of verbatim mode.
    if self.beautify_pat.match(val):
        self.verbatim = False
    elif self.nobeautify_pat.match(val):
        self.verbatim = True
    # Keep trace of @doc parts, to honor the convention for splitting lines.
    if self.start_doc_pat.match(val):
        self.in_doc_part = True
    if self.end_doc_pat.match(val):
        self.in_doc_part = False
# @+node:ekr.20240111051726.1: *6* tbo.do_dedent
def do_dedent(self) -> None:
    """Handle dedent token."""
    # Note: other methods use self.indent_level.
    self.indent_level -= 1
    self.lws = self.indent_level * self.tab_width * ' '
    self.pending_lws = self.lws
    self.pending_ws = ''
    self.prev_output_kind = 'dedent'

# @+node:ekr.20240105145241.11: *6* tbo.do_encoding
def do_encoding(self) -> None:
    """Handle the encoding token."""

# @+node:ekr.20240105145241.12: *6* tbo.do_endmarker
def do_endmarker(self) -> None:
    """Handle an endmarker token."""

    # Ensure exactly one newline at the end of file.
    if self.prev_output_kind not in (
        'indent',
        'dedent',
        'line-indent',
        'newline',
    ):
        self.output_list.append('\n')
    self.pending_lws = ''  # Defensive.
    self.pending_ws = ''  # Defensive.

# @+node:ekr.20240105145241.14: *6* tbo.do_indent
consider_message = 'consider using python/Tools/scripts/reindent.py'

def do_indent(self) -> None:
    """Handle indent token."""

    # Only warn about indentation errors.
    if '\t' in self.input_token.value:  # pragma: no cover
        print(f"Found tab character in {self.filename}")
        print(self.consider_message)
    elif (len(self.input_token.value) % self.tab_width) != 0:  # pragma: no cover
        print(f"Indentation error in {self.filename}")
        print(self.consider_message)

    # Handle the token!
    new_indent = self.input_token.value
    old_indent = self.indent_level * self.tab_width * ' '
    if new_indent > old_indent:
        self.indent_level += 1
    elif new_indent < old_indent:  # pragma: no cover (defensive)
        print(f"\n===== do_indent: can not happen {new_indent!r}, {old_indent!r}")

    self.lws = new_indent
    self.pending_lws = self.lws
    self.pending_ws = ''
    self.prev_output_kind = 'indent'

# @+node:ekr.20240105145241.16: *6* tbo.do_name & generators
# @+node:ekr.20240418050017.1: *7* tbo.do_name
def do_name(self) -> None:
    """Handle a name token."""
    name = self.input_token.value

    # #4420: Special case for *_
    if name == '_':
        prev_token = self.input_tokens[self.index - 1]
        if prev_token.kind == 'op' and prev_token.value == '*':
            self.pending_ws = ''
            self.gen_token('word', name)
            self.gen_blank()
        else:
            self.gen_word(name)
    elif name in self.operator_keywords:
        self.gen_word_op(name)
    else:
        self.gen_word(name)

# @+node:ekr.20240105145241.40: *7* tbo.gen_word
def gen_word(self, s: str) -> None:
    """Add a word request to the code list."""
    assert s == self.input_token.value
    assert s and isinstance(s, str), repr(s)
    self.gen_blank()
    self.gen_token('word', s)
    self.gen_blank()

# @+node:ekr.20240107141830.1: *7* tbo.gen_word_op
def gen_word_op(self, s: str) -> None:
    """Add a word-op request to the code list."""
    assert s == self.input_token.value
    assert s and isinstance(s, str), repr(s)
    self.gen_blank()
    self.gen_token('word-op', s)
    self.gen_blank()

# @+node:ekr.20240105145241.17: *6* tbo.do_newline & do_nl
# @+node:ekr.20240418043826.1: *7* tbo.do_newline
def do_newline(self) -> None:
    """
    do_newline: Handle a regular newline.

    From https://docs.python.org/3/library/token.html

    NEWLINE tokens end *logical* lines of Python code.
    """

    # #4349: Remove trailing ws.
    while self.input_tokens:
        last_token = self.input_tokens[-1]
        if last_token.kind == 'ws':
            self.input_tokens.pop()
        else:
            break

    self.output_list.append('\n')
    self.pending_lws = ''  # Set only by 'dedent', 'indent' or 'ws' tokens.
    self.pending_ws = ''
    self.prev_output_kind = 'newline'
    self.prev_output_value = '\n'

# @+node:ekr.20240418043827.1: *7* tbo.do_nl
def do_nl(self) -> None:
    """
    do_nl: Handle a continuation line.

    From https://docs.python.org/3/library/token.html

    NL tokens end *physical* lines. They appear when when a logical line of
    code spans multiple physical lines.
    """
    self.do_newline()

# @+node:ekr.20240105145241.18: *6* tbo.do_number
def do_number(self) -> None:
    """Handle a number token."""
    self.gen_blank()
    self.gen_token('number', self.input_token.value)

# @+node:ekr.20240105145241.19: *6* tbo.do_op & generators
# @+node:ekr.20240418045924.1: *7* tbo.do_op
def do_op(self) -> None:
    """Handle an op token."""
    val = self.input_token.value

    if val == '.':
        self.gen_dot_op()
    elif val == '@':
        self.gen_token('op-no-blanks', val)
        self.push_state('decorator')
    elif val == ':':
        # Treat slices differently.
        self.gen_colon()
    elif val in ',;':
        # Pep 8: Avoid extraneous whitespace immediately before
        # comma, semicolon, or colon.
        self.pending_ws = ''
        self.gen_token('op', val)
        self.gen_blank()
    elif val in '([{':
        # Pep 8: Avoid extraneous whitespace immediately inside
        # parentheses, brackets or braces.
        self.gen_lt()
    elif val in ')]}':
        # Ditto.
        self.gen_rt()
    elif val == '=':
        self.gen_equal_op()
    elif val in '~+-':
        self.gen_possible_unary_op()
    elif val == '*':
        self.gen_star_op()
    elif val == '**':
        self.gen_star_star_op()
    else:
        # Pep 8: always surround binary operators with a single space.
        # '==','+=','-=','*=','**=','/=','//=','%=','!=','<=','>=','<','>',
        # '^','~','*','**','&','|','/','//',
        # Pep 8: If operators with different priorities are used, consider
        # adding whitespace around the operators with the lowest priorities.
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.31: *7* tbo.gen_colon & helper
def gen_colon(self) -> None:
    """Handle a colon."""
    val = self.input_token.value
    context = self.input_token.context

    self.pending_ws = ''
    if context == 'complex-slice':
        if self.prev_output_value not in '[:':
            self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()
    elif context == 'simple-slice':
        self.gen_token('op-no-blanks', val)
    elif context == 'dict':
        self.gen_token('op', val)
        self.gen_blank()
    else:
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240109035004.1: *7* tbo.gen_dot_op & _next
def gen_dot_op(self) -> None:
    """Handle the '.' input token."""
    context = self.input_token.context

    # Get the previous significant **input** token.
    # This is the only call to next(i) anywhere!
    next_i = self._next(self.index)
    next = 'None' if next_i is None else self.input_tokens[next_i]
    import_is_next = next and next.kind == 'name' and next.value == 'import'  # type:ignore

    if context == 'import':
        if self.prev_output_kind == 'word' and self.prev_output_value in (
            'from',
            'import',
        ):
            self.gen_blank()
            op = 'op' if import_is_next else 'op-no-blanks'
            self.gen_token(op, '.')
        elif import_is_next:
            self.gen_token('op', '.')
            self.gen_blank()
        else:
            self.pending_ws = ''
            self.gen_token('op-no-blanks', '.')
    else:
        self.pending_ws = ''
        self.gen_token('op-no-blanks', '.')

# @+node:ekr.20240105145241.43: *8* tbo._next
def _next(self, i: int) -> Optional[int]:
    """
    Return the next *significant* input token.

    Ignore insignificant tokens: whitespace, indentation, comments, etc.

    The **Global Token Ratio** is tbo.n_scanned_tokens / len(tbo.tokens),
    where tbo.n_scanned_tokens is the total number of calls calls to
    tbo.next or tbo.prev.

    For Leo's sources, this ratio ranges between 0.48 and 1.51!

    The orange_command function warns if this ratio is greater than 2.5.
    Previous versions of this code suffered much higher ratios.
    """
    i += 1
    while i < len(self.input_tokens):
        token = self.input_tokens[i]
        if token.kind not in self.insignificant_tokens:
            # g.trace(f"token: {token!r}")
            return i
        i += 1
    return None  # pragma: no cover

# @+node:ekr.20240105145241.20: *7* tbo.gen_equal_op
def gen_equal_op(self) -> None:
    val = self.input_token.value
    context = self.input_token.context

    if context == 'initializer':
        # Pep 8: Don't use spaces around the = sign when used to indicate
        #        a keyword argument or a default parameter value.
        #        However, when combining an argument annotation with a default value,
        #        *do* use spaces around the = sign.
        self.pending_ws = ''
        self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.35: *7* tbo.gen_lt
def gen_lt(self) -> None:
    """Generate code for a left paren or curly/square bracket."""
    val = self.input_token.value
    assert val in '([{', repr(val)

    # Update state vars.
    if val == '(':
        self.paren_level += 1
    elif val == '[':
        self.square_brackets_stack.append(False)
    else:
        self.curly_brackets_level += 1

    # Generate or suppress the leading blank.
    # Update self.in_arg_list if necessary.
    if self.input_token.context == 'import':
        self.gen_blank()
    elif self.prev_output_kind in ('op', 'word-op'):
        self.gen_blank()
    elif self.prev_output_kind == 'word':
        # Only suppress blanks before '(' or '[' for non-keywords.
        if val == '{' or self.prev_output_value in (
            'if',
            'else',
            'elif',
            'return',
            'for',
            'while',
        ):
            self.gen_blank()
        elif val == '(':
            self.in_arg_list += 1
            self.pending_ws = ''
        else:
            self.pending_ws = ''
    elif self.prev_output_kind != 'line-indent':
        self.pending_ws = ''

    # Output the token!
    self.gen_token('op-no-blanks', val)

# @+node:ekr.20240105145241.37: *7* tbo.gen_possible_unary_op & helper
def gen_possible_unary_op(self) -> None:
    """Add a unary or binary op to the token list."""
    val = self.input_token.value
    if self.is_unary_op(self.index, val):
        prev = self.input_token
        if prev.kind == 'lt':
            self.gen_token('op-no-blanks', val)
        else:
            self.gen_blank()
            self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240109082712.1: *8* tbo.is_unary_op & _prev
def is_unary_op(self, i: int, val: str) -> bool:
    if val == '~':
        return True
    if val not in '+-':  # pragma: no cover
        return False

    # Get the previous significant **input** token.
    # This is the only call to _prev(i) anywhere!
    prev_i = self._prev(i)
    prev_token = None if prev_i is None else self.input_tokens[prev_i]
    kind = prev_token.kind if prev_token else ''
    value = prev_token.value if prev_token else ''

    if kind in ('number', 'string'):
        return_val = False
    elif kind == 'op' and value in ')]':
        return_val = False
    elif kind == 'op' and value in '{([:':
        return_val = True
    elif kind != 'name':
        return_val = True
    else:
        # The hard case: prev_token is a 'name' token.
        # Any Python keyword indicates a unary operator.
        return_val = keyword.iskeyword(value) or keyword.issoftkeyword(value)
    return return_val

# @+node:ekr.20240115233050.1: *9* tbo._prev
def _prev(self, i: int) -> Optional[int]:
    """
    Return the previous *significant* input token.

    Ignore insignificant tokens: whitespace, indentation, comments, etc.
    """
    i -= 1
    while i >= 0:
        token = self.input_tokens[i]
        if token.kind not in self.insignificant_tokens:
            return i
        i -= 1
    return None  # pragma: no cover

# @+node:ekr.20240105145241.36: *7* tbo.gen_rt
def gen_rt(self) -> None:
    """Generate code for a right paren or curly/square bracket."""
    val = self.input_token.value
    assert val in ')]}', repr(val)

    # Update state vars.
    if val == ')':
        self.paren_level -= 1
        self.in_arg_list = max(0, self.in_arg_list - 1)
        if self.prev_output_kind != 'line-indent':
            self.pending_ws = ''
    elif val == ']':
        self.square_brackets_stack.pop()
        if self.prev_output_kind != 'line-indent':
            self.pending_ws = ''
    else:
        self.curly_brackets_level -= 1
        # A hack: put a space before a comma.
        last = self.output_list[-1]
        if last == ',':
            self.pending_ws = ' '
        elif self.prev_output_kind != 'line-indent':
            self.pending_ws = ''
    self.gen_token('rt', val)

# @+node:ekr.20240105145241.38: *7* tbo.gen_star_op
def gen_star_op(self) -> None:
    """Put a '*' op, with special cases for *args."""
    val = self.input_token.value
    context = self.input_token.context

    if context == 'arg':
        self.gen_blank()
        self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.39: *7* tbo.gen_star_star_op
def gen_star_star_op(self) -> None:
    """Put a ** operator, with a special case for **kwargs."""
    val = self.input_token.value
    context = self.input_token.context

    if context == 'arg':
        self.gen_blank()
        self.gen_token('op-no-blanks', val)
    else:
        self.gen_blank()
        self.gen_token('op', val)
        self.gen_blank()

# @+node:ekr.20240105145241.3: *7* tbo.push_state
def push_state(self, kind: str, value: Union[int, str, None] = None) -> None:
    """Append a state to the state stack."""
    state = ParseState(kind, value)
    self.state_stack.append(state)

# @+node:ekr.20240105145241.21: *6* tbo.do_string
def do_string(self) -> None:
    """
    Handle a 'string' token.

    The Tokenizer converts all f-string tokens to a single 'string' token.
    """
    # Careful: continued strings may contain '\r'
    val = self.regularize_newlines(self.input_token.value)
    if val.startswith(('"""', "'''")):
        # #4346: Strip trailing ws in docstrings.
        while ' \n' in val:
            val = val.replace(' \n', '\n')
    self.gen_token('string', val)
    self.gen_blank()

# @+node:ekr.20250731204857.1: *6* tbo.do_tstring_start
def do_tstring_start(self) -> None:
    """
    Handle a tstring_start token.

    do_verbatim handles tstring_middle and tstring_end tokens.
    """
    self.verbatim = True
    val = self.input_token.value
    self.gen_token('verbatim', val)

# @+node:ekr.20240105145241.22: *6* tbo.do_verbatim
def do_verbatim(self) -> None:
    """
    Handle one token in verbatim mode.
    End verbatim mode when the appropriate comment is seen.
    """
    kind = self.input_token.kind

    # Careful: tokens may contain '\r'
    val = self.regularize_newlines(self.input_token.value)
    if kind == 'comment':
        if self.beautify_pat.match(val):
            self.verbatim = False
        val = val.rstrip()
        self.gen_token('comment', val)
        return
    if kind == 'tstring_middle':
        self.gen_token('verbatim', val)
        return
    if kind == 'tstring_end':
        self.gen_token('tstring_middle', val)
        self.verbatim = False
        return
    if kind == 'indent':
        self.indent_level += 1
        self.lws = self.indent_level * self.tab_width * ' '
    if kind == 'dedent':
        self.indent_level -= 1
        self.lws = self.indent_level * self.tab_width * ' '
    self.gen_token('verbatim', val)

# @+node:ekr.20240105145241.23: *6* tbo.do_ws
def do_ws(self) -> None:
    """
    Handle the "ws" pseudo-token.  See Tokenizer.itok.do_token (the gem).

    Put the whitespace only if if ends with backslash-newline.
    """
    val = self.input_token.value
    last_token = self.input_tokens[self.index - 1]

    if last_token.kind in ('nl', 'newline'):
        self.pending_lws = val
        self.pending_ws = ''
    elif '\\\n' in val:
        self.pending_lws = ''
        self.pending_ws = val
    else:
        self.pending_ws = ' ' if val else ''  # #4346.

# @+node:ekr.20240105145241.27: *6* tbo.gen_blank
def gen_blank(self) -> None:
    """
    Queue a *request* for a blank.
    Change *neither* prev_output_kind *nor* pending_lws.
    """
    prev_kind = self.prev_output_kind
    if prev_kind == 'op-no-blanks':
        # A demand that no blank follows this op.
        self.pending_ws = ''
    elif prev_kind == 'hard-blank':
        # Eat any further blanks.
        self.pending_ws = ''
    elif prev_kind in (
        'dedent',
        'file-start',
        'indent',
        'line-indent',
        'newline',
    ):
        # Suppress the blank, but do *not* change the pending ws.
        pass
    elif self.pending_ws:
        # Use the existing pending ws.
        pass
    else:
        self.pending_ws = ' '

# @+node:ekr.20240105145241.26: *6* tbo.gen_token
def gen_token(self, kind: str, value: str) -> None:
    """Add an output token to the code list."""

    if self.pending_lws:
        self.output_list.append(self.pending_lws)
    elif self.pending_ws:
        self.output_list.append(self.pending_ws)

    self.output_list.append(value)
    self.pending_lws = ''
    self.pending_ws = ''
    self.prev_output_value = value
    self.prev_output_kind = kind

# @+node:ekr.20240110205127.1: *5* tbo: Scanning
# The parser calls scanner methods to move through the list of input tokens.
# @+node:ekr.20240128114622.1: *6* tbo.pre_scan & helpers
def pre_scan(self) -> None:
    """
    Scan the entire file in one iterative pass, adding context to a few
    kinds of tokens as follows:

    Token   Possible Contexts (or None)
    =====   ===========================
    ':'     'annotation', 'dict', 'complex-slice', 'simple-slice'
    '='     'annotation', 'initializer'
    '*'     'arg'
    '**'    'arg'
    '.'     'import'
    """

    # The main loop.
    in_import = False
    scan_stack: list[ScanState] = []
    prev_token: Optional[InputToken] = None
    for i, token in enumerate(self.input_tokens):
        kind, value = token.kind, token.value
        if kind in 'newline':
            << pre-scan 'newline' tokens >>
        elif kind == 'op':
            << pre-scan 'op' tokens >>
        elif kind == 'name':
            << pre-scan 'name' tokens >>
        # Remember the previous significant token.
        if kind not in self.insignificant_kinds:
            prev_token = token
    # Sanity check.
    if scan_stack:  # pragma: no cover
        print('pre_scan: non-empty scan_stack')
        print(scan_stack)

# @+node:ekr.20240128230812.1: *7* << pre-scan 'newline' tokens >>
# 'import' and 'from x import' statements may span lines.
# 'ws' tokens represent continued lines like this:   ws: ' \\\n    '
if in_import and not scan_stack:
    in_import = False
# @+node:ekr.20240128123117.1: *7* << pre-scan 'op' tokens >>
top_state: Optional[ScanState] = scan_stack[-1] if scan_stack else None

# Handle '[' and ']'.
if value == '[':
    scan_stack.append(ScanState('slice', token))
elif value == ']':
    assert top_state and top_state.kind == 'slice'
    self.finish_slice(i, top_state)
    scan_stack.pop()

# Handle '{' and '}'.
if value == '{':
    scan_stack.append(ScanState('dict', token))
elif value == '}':
    assert top_state and top_state.kind == 'dict'
    self.finish_dict(i, top_state)
    scan_stack.pop()

# Handle '(' and ')'
elif value == '(':
    if self.is_python_keyword(prev_token) or prev_token and prev_token.kind != 'name':
        state_kind = '('
    else:
        state_kind = 'arg'
    scan_stack.append(ScanState(state_kind, token))
elif value == ')':
    assert top_state and top_state.kind in ('(', 'arg'), repr(top_state)
    if top_state.kind == 'arg':
        self.finish_arg(i, top_state)
    else:
        self.finish_paren(i, top_state)
    scan_stack.pop()

# Handle interior tokens in 'arg' and 'slice' states.
if top_state:
    if top_state.kind in ('dict', 'slice') and value == ':':
        top_state.value.append(i)
    if top_state.kind == 'arg' and value in '**=:,':
        top_state.value.append(i)

# Handle '.' and '(' tokens inside 'import' and 'from' statements.
if in_import and value in '(.':
    self.set_context(i, 'import')
# @+node:ekr.20240128231119.1: *7* << pre-scan 'name' tokens >>
prev_is_yield = prev_token and prev_token.kind == 'name' and prev_token.value == 'yield'
if value in ('from', 'import') and not prev_is_yield:
    # 'import' and 'from x import' statements should be at the outer level.
    assert not scan_stack, scan_stack
    in_import = True
# @+node:ekr.20240129041304.1: *7* tbo.finish_arg
def finish_arg(self, end: int, state: Optional[ScanState]) -> None:
    """Set context for '=', ':', '*', and '**' tokens when scanning from '(' to ')'."""

    # Sanity checks.
    if not state:
        return
    assert state.kind == 'arg', repr(state)
    token = state.token
    assert token.value == '(', repr(token)
    values = state.value
    assert isinstance(values, list), repr(values)
    i1 = token.index
    assert i1 < end, (i1, end)
    if not values:
        return

    # Compute the context for each *separate* '=' token.
    equal_context = 'initializer'
    for i in values:
        token = self.input_tokens[i]
        assert token.kind == 'op', repr(token)
        if token.value == ',':
            equal_context = 'initializer'
        elif token.value == ':':
            equal_context = 'annotation'
        elif token.value == '=':
            self.set_context(i, equal_context)
            equal_context = 'initializer'

    # Set the context of all outer-level ':', '*', and '**' tokens.
    prev: Optional[InputToken] = None
    for i in range(i1, end):
        token = self.input_tokens[i]
        if token.kind not in self.insignificant_kinds:
            if token.kind == 'op':
                if token.value in ('*', '**'):
                    if self.is_unary_op_with_prev(prev, token):
                        self.set_context(i, 'arg')
                elif token.value == '=':
                    # The code above has set the context.
                    assert token.context in ('initializer', 'annotation'), (
                        i,
                        repr(token.context),
                    )
                elif token.value == ':':
                    self.set_context(i, 'annotation')
            prev = token

# @+node:ekr.20250507041900.1: *7* tbo.finish_paren
def finish_paren(self, end: int, state: Optional[ScanState]) -> None:
    """Set context for '=' tokens when scanning from '(' to ')'."""

    # Sanity checks.
    if not state:
        return
    assert state.kind == '(', repr(state)
    token = state.token
    assert token.value == '(', repr(token)
    i1 = token.index
    assert i1 < end, (i1, end)

    # Compute the context for each *separate* '=' token.
    for token in self.input_tokens[i1:end]:
        if token.kind == 'op' and token.value == '=':
            self.set_context(token.index, 'initializer')

# @+node:ekr.20240128233406.1: *7* tbo.finish_slice
def finish_slice(self, end: int, state: ScanState) -> None:
    """Set context for all ':' when scanning from '[' to ']'."""

    # Sanity checks.
    assert state.kind == 'slice', repr(state)
    token = state.token
    assert token.value == '[', repr(token)
    colons = state.value
    assert isinstance(colons, list), repr(colons)
    i1 = token.index
    assert i1 < end, (i1, end)

    # Do nothing if there are no ':' tokens in the slice.
    if not colons:
        return

    # Compute final context by scanning the tokens.
    final_context = 'simple-slice'
    inter_colon_tokens = 0
    prev = token
    for i in range(i1 + 1, end - 1):
        token = self.input_tokens[i]
        kind, value = token.kind, token.value
        if kind not in self.insignificant_kinds:
            if kind == 'op':
                if value == ':':
                    inter_colon_tokens = 0
                elif value in '-+':
                    # Ignore unary '-' or '+' tokens.
                    if not self.is_unary_op_with_prev(prev, token):
                        inter_colon_tokens += 1
                        if inter_colon_tokens > 1:
                            final_context = 'complex-slice'
                            break
                elif value == '~':
                    # '~' is always a unary op.
                    pass
                else:
                    # All other ops contribute.
                    inter_colon_tokens += 1
                    if inter_colon_tokens > 1:
                        final_context = 'complex-slice'
                        break
            else:
                inter_colon_tokens += 1
                if inter_colon_tokens > 1:
                    final_context = 'complex-slice'
                    break
            prev = token

    # Set the context of all outer-level ':' tokens.
    for i in colons:
        self.set_context(i, final_context)

# @+node:ekr.20240129040347.1: *7* tbo.finish_dict
def finish_dict(self, end: int, state: Optional[ScanState]) -> None:
    """
    Set context for all ':' when scanning from '{' to '}'

    Strictly speaking, setting this context is unnecessary because
    tbo.gen_colon generates the same code regardless of this context.

    In other words, this method can be a do-nothing!
    """

    # Sanity checks.
    if not state:
        return
    assert state.kind == 'dict', repr(state)
    token = state.token
    assert token.value == '{', repr(token)
    colons = state.value
    assert isinstance(colons, list), repr(colons)
    i1 = token.index
    assert i1 < end, (i1, end)

    # Set the context for all ':' tokens.
    for i in colons:
        self.set_context(i, 'dict')

# @+node:ekr.20240129034209.1: *6* tbo.is_unary_op_with_prev
def is_unary_op_with_prev(self, prev: Optional[InputToken], token: InputToken) -> bool:
    """
    Return True if token is a unary op in the context of prev, the previous
    significant token.
    """
    if token.value == '~':  # pragma: no cover
        return True
    if prev is None:
        return True  # pragma: no cover
    assert token.value in '**-+', repr(token.value)
    if prev.kind in ('number', 'string'):
        return_val = False
    elif prev.kind == 'op' and prev.value in ')]':
        # An unnecessary test?
        return_val = False  # pragma: no cover
    elif prev.kind == 'op' and prev.value in '{([:,':
        return_val = True
    elif prev.kind != 'name':
        # An unnecessary test?
        return_val = True  # pragma: no cover
    else:
        # prev is a'name' token.
        return self.is_python_keyword(token)
    return return_val

# @+node:ekr.20240129035336.1: *6* tbo.is_python_keyword
def is_python_keyword(self, token: Optional[InputToken]) -> bool:
    """Return True if token is a 'name' token referring to a Python keyword."""
    if not token or token.kind != 'name':
        return False
    return keyword.iskeyword(token.value) or keyword.issoftkeyword(token.value)

# @+node:ekr.20240106170746.1: *6* tbo.set_context
def set_context(self, i: int, context: str) -> None:
    """
    Set self.input_tokens[i].context, but only if it does not already exist!

    See the docstring for pre_scan for details.
    """

    trace = False  # Do not delete the trace below.

    valid_contexts = (
        'annotation',
        'arg',
        'complex-slice',
        'simple-slice',
        'dict',
        'import',
        'initializer',
    )
    if context not in valid_contexts:
        self.oops(f"Unexpected context! {context!r}")  # pragma: no cover

    token = self.input_tokens[i]

    if trace:  # pragma: no cover
        token_s = f"<{token.kind}: {token.show_val(12)}>"
        ignore_s = 'Ignore' if token.context else ' ' * 6
        print(f"{i:3} {ignore_s} token: {token_s} context: {context}")

    if not token.context:
        token.context = context

# @+node:ekr.20200702115002.1: *4* command: orange_command (leoAst.py)
def orange_command(
    arg_files: list[str],
    files: list[str],
    settings: Settings = None,
) -> None:
    if not check_g():
        return
    t1 = time.process_time()
    any_changed = 0
    for filename in files:
        if os.path.exists(filename):
            # print(f"orange {filename}")
            changed = Orange(settings).beautify_file(filename)
            if changed:
                any_changed += 1
        else:
            print(f"file not found: {filename}")
    t2 = time.process_time()
    if any_changed or Orange(settings).verbose:
        n, files_s = any_changed, ','.join(arg_files)
        print(f"orange: {t2 - t1:3.1f} sec. changed {n} file{g.plural(n)} in {files_s}")

# @+node:ekr.20200702121315.1: *4* command: orange_diff_command
def orange_diff_command(files: list[str], settings: Settings = None) -> None:
    if not check_g():
        return
    for filename in files:
        if os.path.exists(filename):
            print(f"orange-diff {filename}")
            Orange(settings).beautify_file_diff(filename)
        else:
            print(f"file not found: {filename}")

# @+node:ekr.20200108045048.1: *4* orange_settings
def orange_settings(c: Cmdr) -> dict[str, object]:
    """Return a dictionary of settings for the leo.core.leoAst.Orange class."""
    allow_joined_strings = c.config.getBool('beautify-allow-joined-strings', default=False)
    n_max_join = c.config.getInt('beautify-max-join-line-length')
    max_join_line_length = 88 if n_max_join is None else n_max_join
    n_max_split = c.config.getInt('beautify-max-split-line-length')
    max_split_line_length = 88 if n_max_split is None else n_max_split
    # Join <= Split.
    # pylint: disable=consider-using-min-builtin
    if max_join_line_length > max_split_line_length:
        max_join_line_length = max_split_line_length
    return {
        'allow_joined_strings': allow_joined_strings,
        'max_join_line_length': max_join_line_length,
        'max_split_line_length': max_split_line_length,
        'tab_width': abs(c.tab_width),
    }


# @-all
# @@nosearch
# @-leo
