Browse Source

temp fix ydb oss sync config to unlock sync on /vendor dependency

alexv-smirnov 2 years ago
parent
commit
c140abc954

+ 7 - 0
CMakeLists.darwin.txt

@@ -172,6 +172,7 @@ add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/meta)
 add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/status)
 add_subdirectory(contrib/libs/grpc/third_party/address_sorting)
 add_subdirectory(contrib/libs/re2)
+add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/algorithm)
 add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/functional)
 add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/utility)
 add_subdirectory(contrib/libs/grpc/grpc++)
@@ -998,6 +999,12 @@ add_subdirectory(ydb/library/yql/udfs/common/yson2)
 add_subdirectory(ydb/library/yql/udfs/logs/dsv)
 add_subdirectory(ydb/apps/ydb)
 add_subdirectory(ydb/apps/ydb/commands)
+add_subdirectory(ydb/public/sdk/cpp/client/iam)
+add_subdirectory(ydb/public/sdk/cpp/client/iam/proto/v1)
+add_subdirectory(contrib/libs/googleapis-common-protos)
+add_subdirectory(ydb/public/sdk/cpp/client/iam/impl)
+add_subdirectory(library/cpp/http/simple)
+add_subdirectory(ydb/public/sdk/cpp/client/iam/common)
 add_subdirectory(ydb/public/lib/ydb_cli/commands)
 add_subdirectory(library/cpp/threading/local_executor)
 add_subdirectory(contrib/libs/tbb)

+ 7 - 0
CMakeLists.linux.txt

@@ -175,6 +175,7 @@ add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/meta)
 add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/status)
 add_subdirectory(contrib/libs/grpc/third_party/address_sorting)
 add_subdirectory(contrib/libs/re2)
+add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/algorithm)
 add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/functional)
 add_subdirectory(contrib/restricted/abseil-cpp-tstring/y_absl/utility)
 add_subdirectory(contrib/libs/grpc/grpc++)
@@ -1002,6 +1003,12 @@ add_subdirectory(ydb/library/yql/udfs/common/yson2)
 add_subdirectory(ydb/library/yql/udfs/logs/dsv)
 add_subdirectory(ydb/apps/ydb)
 add_subdirectory(ydb/apps/ydb/commands)
+add_subdirectory(ydb/public/sdk/cpp/client/iam)
+add_subdirectory(ydb/public/sdk/cpp/client/iam/proto/v1)
+add_subdirectory(contrib/libs/googleapis-common-protos)
+add_subdirectory(ydb/public/sdk/cpp/client/iam/impl)
+add_subdirectory(library/cpp/http/simple)
+add_subdirectory(ydb/public/sdk/cpp/client/iam/common)
 add_subdirectory(ydb/public/lib/ydb_cli/commands)
 add_subdirectory(library/cpp/threading/local_executor)
 add_subdirectory(contrib/libs/tbb)

+ 45 - 0
build/scripts/cgo1_wrapper.py

@@ -0,0 +1,45 @@
+import argparse
+import shutil
+import subprocess
+import sys
+
+
+CGO1_SUFFIX='.cgo1.go'
+
+
+def call(cmd, cwd, env=None):
+    # sys.stderr.write('{}\n'.format(' '.join(cmd)))
+    return subprocess.call(cmd, stdin=None, stderr=sys.stderr, stdout=sys.stdout, cwd=cwd, env=env)
+
+
+def process_file(source_root, source_prefix, build_root, build_prefix, src_path, comment_prefix):
+    dst_path = '{}.tmp'.format(src_path)
+    with open(src_path, 'r') as src_file, open(dst_path, 'w') as dst_file:
+        for line in src_file:
+            if line.startswith(comment_prefix):
+                dst_file.write(line.replace(source_root, source_prefix).replace(build_root, build_prefix))
+            else:
+                dst_file.write(line)
+    shutil.move(dst_path, src_path)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--build-prefix', default='__ARCADIA_BUILD_ROOT_PREFIX__')
+    parser.add_argument('--build-root', required=True)
+    parser.add_argument('--cgo1-files', nargs='+', required=True)
+    parser.add_argument('--cgo2-files', nargs='+', required=True)
+    parser.add_argument('--source-prefix', default='__ARCADIA_SOURCE_ROOT_PREFIX__')
+    parser.add_argument('--source-root', required=True)
+    parser.add_argument('cgo1_cmd', nargs='*')
+    args = parser.parse_args()
+
+    exit_code = call(args.cgo1_cmd, args.source_root)
+    if exit_code != 0:
+        sys.exit(exit_code)
+
+    for src_path in args.cgo1_files:
+        process_file(args.source_root, args.source_prefix, args.build_root, args.build_prefix, src_path, '//')
+
+    for src_path in args.cgo2_files:
+        process_file(args.source_root, args.source_prefix, args.build_root, args.build_prefix, src_path, '#line')

+ 0 - 0
build/scripts/go_fake_include/go_asm.h


+ 867 - 0
build/scripts/go_tool.py

@@ -0,0 +1,867 @@
+from __future__ import absolute_import, unicode_literals
+import argparse
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tarfile
+import tempfile
+import threading
+import six
+from functools import reduce
+
+import process_command_files as pcf
+import process_whole_archive_option as pwa
+
+arc_project_prefix = 'a.yandex-team.ru/'
+std_lib_prefix = 'contrib/go/_std_1.18/src/'
+vendor_prefix = 'vendor/'
+vet_info_ext = '.vet.out'
+vet_report_ext = '.vet.txt'
+
+FIXED_CGO1_SUFFIX='.fixed.cgo1.go'
+
+COMPILE_OPTIMIZATION_FLAGS=('-N',)
+
+
+def get_trimpath_args(args):
+    return ['-trimpath', args.trimpath] if args.trimpath else []
+
+
+def preprocess_cgo1(src_path, dst_path, source_root):
+    with open(src_path, 'r') as f:
+        content = f.read()
+        content = content.replace('__ARCADIA_SOURCE_ROOT_PREFIX__', source_root)
+    with open(dst_path, 'w') as f:
+        f.write(content)
+
+
+def preprocess_args(args):
+    # Temporary work around for noauto
+    if args.cgo_srcs and len(args.cgo_srcs) > 0:
+        cgo_srcs_set = set(args.cgo_srcs)
+        args.srcs = [x for x in args.srcs if x not in cgo_srcs_set]
+
+    args.pkg_root = os.path.join(args.toolchain_root, 'pkg')
+    toolchain_tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
+    args.go_compile = os.path.join(toolchain_tool_root, 'compile')
+    args.go_cgo = os.path.join(toolchain_tool_root, 'cgo')
+    args.go_link = os.path.join(toolchain_tool_root, 'link')
+    args.go_asm = os.path.join(toolchain_tool_root, 'asm')
+    args.go_pack = os.path.join(toolchain_tool_root, 'pack')
+    args.go_vet = os.path.join(toolchain_tool_root, 'vet') if args.vet is True else args.vet
+    args.output = os.path.normpath(args.output)
+    args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
+    args.trimpath = None
+    if args.debug_root_map:
+        roots = {'build': args.build_root, 'source': args.source_root, 'tools': args.tools_root}
+        replaces = []
+        for root in args.debug_root_map.split(';'):
+            src, dst = root.split('=', 1)
+            assert src in roots
+            replaces.append('{}=>{}'.format(roots[src], dst))
+            del roots[src]
+        assert len(replaces) > 0
+        args.trimpath = ';'.join(replaces)
+    args.build_root = os.path.normpath(args.build_root)
+    args.build_root_dir = args.build_root + os.path.sep
+    args.source_root = os.path.normpath(args.source_root)
+    args.source_root_dir = args.source_root + os.path.sep
+    args.output_root = os.path.normpath(args.output_root)
+    args.import_map = {}
+    args.module_map = {}
+    if args.cgo_peers:
+        args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
+
+    srcs = []
+    for f in args.srcs:
+        if f.endswith('.gosrc'):
+            with tarfile.open(f, 'r') as tar:
+                srcs.extend(os.path.join(args.output_root, src) for src in tar.getnames())
+                tar.extractall(path=args.output_root)
+        else:
+            srcs.append(f)
+    args.srcs = srcs
+
+    assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
+    # add lexical oreder by basename for go sources
+    args.srcs.sort(key=lambda x: os.path.basename(x))
+    if args.test_srcs:
+        args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
+        del args.test_srcs
+    if args.xtest_srcs:
+        args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
+
+    # compute root relative module dir path
+    assert args.output is None or args.output_root == os.path.dirname(args.output)
+    assert args.output_root.startswith(args.build_root_dir)
+    args.module_path = args.output_root[len(args.build_root_dir):]
+    args.source_module_dir = os.path.join(args.source_root, args.test_import_path or args.module_path) + os.path.sep
+    assert len(args.module_path) > 0
+    args.import_path, args.is_std = get_import_path(args.module_path)
+
+    assert args.asmhdr is None or args.word == 'go'
+
+    srcs = []
+    for f in args.srcs:
+        if f.endswith(FIXED_CGO1_SUFFIX) and f.startswith(args.build_root_dir):
+            path = os.path.join(args.output_root, '{}.cgo1.go'.format(os.path.basename(f[:-len(FIXED_CGO1_SUFFIX)])))
+            srcs.append(path)
+            preprocess_cgo1(f, path, args.source_root)
+        else:
+            srcs.append(f)
+    args.srcs = srcs
+
+    if args.extldflags:
+        args.extldflags = pwa.ProcessWholeArchiveOption(args.targ_os).construct_cmd(args.extldflags)
+
+    classify_srcs(args.srcs, args)
+
+
+def compare_versions(version1, version2):
+    def last_index(version):
+        index = version.find('beta')
+        return len(version) if index < 0 else index
+
+    v1 = tuple(x.zfill(8) for x in version1[:last_index(version1)].split('.'))
+    v2 = tuple(x.zfill(8) for x in version2[:last_index(version2)].split('.'))
+    if v1 == v2:
+        return 0
+    return 1 if v1 < v2 else -1
+
+
+def get_symlink_or_copyfile():
+    os_symlink = getattr(os, 'symlink', None)
+    if os_symlink is None or os.name == 'nt':
+        os_symlink = shutil.copyfile
+    return os_symlink
+
+
+def copy_args(args):
+    return copy.copy(args)
+
+
+def get_vendor_index(import_path):
+    index = import_path.rfind('/' + vendor_prefix)
+    if index < 0:
+        index = 0 if import_path.startswith(vendor_prefix) else index
+    else:
+        index = index + 1
+    return index
+
+
+def get_import_path(module_path):
+    assert len(module_path) > 0
+    import_path = module_path.replace('\\', '/')
+    is_std_module = import_path.startswith(std_lib_prefix)
+    if is_std_module:
+        import_path = import_path[len(std_lib_prefix):]
+    elif import_path.startswith(vendor_prefix):
+        import_path = import_path[len(vendor_prefix):]
+    else:
+        import_path = arc_project_prefix + import_path
+    assert len(import_path) > 0
+    return import_path, is_std_module
+
+
+def call(cmd, cwd, env=None):
+    # sys.stderr.write('{}\n'.format(' '.join(cmd)))
+    return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env, text=True)
+
+
+def classify_srcs(srcs, args):
+    args.go_srcs = [x for x in srcs if x.endswith('.go')]
+    args.asm_srcs = [x for x in srcs if x.endswith('.s')]
+    args.objects = [x for x in srcs if x.endswith('.o') or x.endswith('.obj')]
+    args.symabis = [x for x in srcs if x.endswith('.symabis')]
+    args.sysos = [x for x in srcs if x.endswith('.syso')]
+
+
+def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
+    info = {'importmap': [], 'packagefile': [], 'standard': {}}
+    if gen_importmap:
+        for key, value in six.iteritems(import_map):
+            info['importmap'].append((key, value))
+    for peer in peers:
+        peer_import_path, is_std = get_import_path(os.path.dirname(peer))
+        if gen_importmap:
+            index = get_vendor_index(peer_import_path)
+            if index >= 0:
+                index += len(vendor_prefix)
+                info['importmap'].append((peer_import_path[index:], peer_import_path))
+        info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
+        if is_std:
+            info['standard'][peer_import_path] = True
+    for key, value in six.iteritems(module_map):
+        info['packagefile'].append((key, value))
+    return info
+
+
+def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
+    lines = []
+    info = get_import_config_info(peers, gen_importmap, import_map, module_map)
+    for key in ('importmap', 'packagefile'):
+        for item in info[key]:
+            lines.append('{} {}={}'.format(key, *item))
+    if len(lines) > 0:
+        lines.append('')
+        content = '\n'.join(lines)
+        # sys.stderr.writelines('{}\n'.format(l) for l in lines)
+        with tempfile.NamedTemporaryFile(delete=False) as f:
+            f.write(content.encode('UTF-8'))
+            return f.name
+    return None
+
+
+def create_embed_config(args):
+    data = {
+        'Patterns': {},
+        'Files': {},
+    }
+    for info in args.embed:
+        pattern = info[0]
+        if pattern.endswith('/**/*'):
+            pattern = pattern[:-3]
+        files = {os.path.relpath(f, args.source_module_dir).replace('\\', '/'): f for f in info[1:]}
+        data['Patterns'][pattern] = list(files.keys())
+        data['Files'].update(files)
+    # sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
+    with tempfile.NamedTemporaryFile(delete=False, suffix='.embedcfg') as f:
+        f.write(json.dumps(data).encode('UTF-8'))
+        return f.name
+
+
+def vet_info_output_name(path, ext=None):
+    return '{}{}'.format(path, ext or vet_info_ext)
+
+
+def vet_report_output_name(path, ext=None):
+    return '{}{}'.format(path, ext or vet_report_ext)
+
+
+def get_source_path(args):
+    return args.test_import_path or args.module_path
+
+
+def gen_vet_info(args):
+    import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
+    info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
+
+    import_map = dict(info['importmap'])
+    # FIXME(snermolaev): it seems that adding import map for 'fake' package
+    #                    does't make any harm (it needs to be revised later)
+    import_map['unsafe'] = 'unsafe'
+
+    for (key, _) in info['packagefile']:
+        if key not in import_map:
+            import_map[key] = key
+
+    data = {
+        'ID': import_path,
+        'Compiler': 'gc',
+        'Dir': os.path.join(args.source_root, get_source_path(args)),
+        'ImportPath': import_path,
+        'GoFiles': [x for x in args.go_srcs if x.endswith('.go')],
+        'NonGoFiles': [x for x in args.go_srcs if not x.endswith('.go')],
+        'ImportMap': import_map,
+        'PackageFile': dict(info['packagefile']),
+        'Standard': dict(info['standard']),
+        'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
+        'VetxOnly': False,
+        'VetxOutput': vet_info_output_name(args.output),
+        'SucceedOnTypecheckFailure': False
+    }
+    # sys.stderr.write('{}\n'.format(json.dumps(data, indent=4)))
+    return data
+
+
+def create_vet_config(args, info):
+    with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
+        f.write(json.dumps(info).encode('UTF-8'))
+        return f.name
+
+
+def decode_vet_report(json_report):
+    report = ''
+    if json_report:
+        try:
+            full_diags = json.JSONDecoder().decode(json_report.decode('UTF-8'))
+        except ValueError:
+            report = json_report
+        else:
+            messages = []
+            for _, module_diags in six.iteritems(full_diags):
+                for _, type_diags in six.iteritems(module_diags):
+                    for diag in type_diags:
+                        messages.append('{}: {}'.format(diag['posn'], json.dumps(diag['message'])))
+            report = '\n'.join(messages)
+
+    return report
+
+
+def dump_vet_report(args, report):
+    if report:
+        report = report.replace(args.build_root, '$B')
+        report = report.replace(args.source_root, '$S')
+    with open(args.vet_report_output, 'w') as f:
+        f.write(report)
+
+
+def read_vet_report(args):
+    assert args
+    report = ''
+    if os.path.exists(args.vet_report_output):
+        with open(args.vet_report_output, 'r') as f:
+            report += f.read()
+    return report
+
+
+def dump_vet_report_for_tests(args, *test_args_list):
+    dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), [_f for _f in test_args_list if _f], ''))
+
+
+def do_vet(args):
+    assert args.vet
+    info = gen_vet_info(args)
+    vet_config = create_vet_config(args, info)
+    cmd = [args.go_vet, '-json']
+    if args.vet_flags:
+        cmd.extend(args.vet_flags)
+    cmd.append(vet_config)
+    # sys.stderr.write('>>>> [{}]\n'.format(' '.join(cmd)))
+    p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.source_root)
+    vet_out, vet_err = p_vet.communicate()
+    report = decode_vet_report(vet_out) if vet_out else ''
+    dump_vet_report(args, report)
+    if p_vet.returncode:
+        raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
+
+
+def _do_compile_go(args):
+    import_path, is_std_module = args.import_path, args.is_std
+    cmd = [
+        args.go_compile,
+        '-o',
+        args.output,
+        '-p',
+        import_path,
+        '-D',
+        '""',
+        '-goversion',
+        'go{}'.format(args.goversion)
+    ]
+    if args.lang:
+        cmd.append('-lang=go{}'.format(args.lang))
+    cmd.extend(get_trimpath_args(args))
+    compiling_runtime = False
+    if is_std_module:
+        cmd.append('-std')
+        if import_path in ('runtime', 'internal/abi', 'internal/bytealg', 'internal/cpu') or import_path.startswith('runtime/internal/'):
+            cmd.append('-+')
+            compiling_runtime = True
+    import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
+    if import_config_name:
+        cmd += ['-importcfg', import_config_name]
+    else:
+        if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
+            pass
+        else:
+            cmd.append('-complete')
+    # if compare_versions('1.16', args.goversion) >= 0:
+    if args.embed:
+        embed_config_name = create_embed_config(args)
+        cmd.extend(['-embedcfg', embed_config_name])
+    if args.asmhdr:
+        cmd += ['-asmhdr', args.asmhdr]
+    # Use .symabis (starting from 1.12 version)
+    if args.symabis:
+        cmd += ['-symabis'] + args.symabis
+    # If 1.12 <= version < 1.13 we have to pass -allabis for 'runtime' and 'runtime/internal/atomic'
+    # if compare_versions('1.13', args.goversion) >= 0:
+    #     pass
+    # elif import_path in ('runtime', 'runtime/internal/atomic'):
+    #     cmd.append('-allabis')
+    compile_workers = '4'
+    if args.compile_flags:
+        if compiling_runtime:
+            cmd.extend(x for x in args.compile_flags if x not in COMPILE_OPTIMIZATION_FLAGS)
+        else:
+            cmd.extend(args.compile_flags)
+        if any([x in ('-race', '-shared') for x in args.compile_flags]):
+            compile_workers = '1'
+    cmd += ['-pack', '-c={}'.format(compile_workers)]
+    cmd += args.go_srcs
+    call(cmd, args.build_root)
+
+
+class VetThread(threading.Thread):
+
+    def __init__(self, target, args):
+        super(VetThread, self).__init__(target=target, args=args)
+        self.exc_info = None
+
+    def run(self):
+        try:
+            super(VetThread, self).run()
+        except:
+            self.exc_info = sys.exc_info()
+
+    def join_with_exception(self, reraise_exception):
+        self.join()
+        if reraise_exception and self.exc_info:
+            six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
+
+
+def do_compile_go(args):
+    raise_exception_from_vet = False
+    if args.vet:
+        run_vet = VetThread(target=do_vet, args=(args,))
+        run_vet.start()
+    try:
+        _do_compile_go(args)
+        raise_exception_from_vet = True
+    finally:
+        if args.vet:
+            run_vet.join_with_exception(raise_exception_from_vet)
+
+
+def do_compile_asm(args):
+    def need_compiling_runtime(import_path):
+        return import_path in ('runtime', 'reflect', 'syscall') or \
+            import_path.startswith('runtime/internal/') or \
+            compare_versions('1.17', args.goversion) >= 0 and import_path == 'internal/bytealg'
+
+    assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
+    cmd = [args.go_asm]
+    cmd += get_trimpath_args(args)
+    cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
+    cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
+
+    # if compare_versions('1.16', args.goversion) >= 0:
+    cmd += ['-p', args.import_path]
+    if need_compiling_runtime(args.import_path):
+        cmd += ['-compiling-runtime']
+
+    if args.asm_flags:
+        cmd += args.asm_flags
+    cmd += args.asm_srcs
+    call(cmd, args.build_root)
+
+
+def do_link_lib(args):
+    if len(args.asm_srcs) > 0:
+        asmargs = copy_args(args)
+        asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
+        do_compile_go(asmargs)
+        for src in asmargs.asm_srcs:
+            asmargs.srcs = [src]
+            asmargs.asm_srcs = [src]
+            asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
+            do_compile_asm(asmargs)
+            args.objects.append(asmargs.output)
+    else:
+        do_compile_go(args)
+    if args.objects or args.sysos:
+        cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
+        call(cmd, args.build_root)
+
+
+def do_link_exe(args):
+    assert args.extld is not None
+    assert args.non_local_peers is not None
+    compile_args = copy_args(args)
+    compile_args.output = os.path.join(args.output_root, 'main.a')
+    compile_args.real_import_path = compile_args.import_path
+    compile_args.import_path = 'main'
+
+    if args.vcs and os.path.isfile(compile_args.vcs):
+        build_info = os.path.join('library', 'go', 'core', 'buildinfo')
+        if any([x.startswith(build_info) for x in compile_args.peers]):
+            compile_args.go_srcs.append(compile_args.vcs)
+
+    do_link_lib(compile_args)
+    cmd = [args.go_link, '-o', args.output]
+    import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
+    if import_config_name:
+        cmd += ['-importcfg', import_config_name]
+    if args.link_flags:
+        cmd += args.link_flags
+
+    if args.mode in ('exe', 'test'):
+        cmd.append('-buildmode=exe')
+    elif args.mode == 'dll':
+        cmd.append('-buildmode=c-shared')
+    else:
+        assert False, 'Unexpected mode: {}'.format(args.mode)
+    cmd.append('-extld={}'.format(args.extld))
+
+    extldflags = []
+    if args.extldflags is not None:
+        filter_musl = bool
+        if args.musl:
+            cmd.append('-linkmode=external')
+            extldflags.append('-static')
+            filter_musl = lambda x: x not in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
+        extldflags += [x for x in args.extldflags if filter_musl(x)]
+    cgo_peers = []
+    if args.cgo_peers is not None and len(args.cgo_peers) > 0:
+        is_group = args.targ_os == 'linux'
+        if is_group:
+            cgo_peers.append('-Wl,--start-group')
+        cgo_peers.extend(args.cgo_peers)
+        if is_group:
+            cgo_peers.append('-Wl,--end-group')
+    try:
+        index = extldflags.index('--cgo-peers')
+        extldflags = extldflags[:index] + cgo_peers + extldflags[index+1:]
+    except ValueError:
+        extldflags.extend(cgo_peers)
+    if len(extldflags) > 0:
+        cmd.append('-extldflags={}'.format(' '.join(extldflags)))
+    cmd.append(compile_args.output)
+    call(cmd, args.build_root)
+
+
+def gen_cover_info(args):
+    lines = []
+    lines.extend([
+        """
+var (
+    coverCounters = make(map[string][]uint32)
+    coverBlocks = make(map[string][]testing.CoverBlock)
+)
+        """,
+        'func init() {',
+    ])
+    for var, file in (x.split(':') for x in args.cover_info):
+        lines.append('    coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
+    lines.extend([
+        '}',
+        """
+func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
+    if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
+        panic("coverage: mismatched sizes")
+    }
+    if coverCounters[fileName] != nil {
+        // Already registered.
+        return
+    }
+    coverCounters[fileName] = counter
+    block := make([]testing.CoverBlock, len(counter))
+    for i := range counter {
+        block[i] = testing.CoverBlock{
+            Line0: pos[3*i+0],
+            Col0: uint16(pos[3*i+2]),
+            Line1: pos[3*i+1],
+            Col1: uint16(pos[3*i+2]>>16),
+            Stmts: numStmts[i],
+        }
+    }
+    coverBlocks[fileName] = block
+}
+        """,
+    ])
+    return lines
+
+
+def filter_out_skip_tests(tests, skip_tests):
+    skip_set = set()
+    star_skip_set = set()
+    for t in skip_tests:
+        work_set = star_skip_set if '*' in t else skip_set
+        work_set.add(t)
+
+    re_star_tests = None
+    if len(star_skip_set) > 0:
+        re_star_tests = re.compile(re.sub(r'(\*)+', r'.\1', '^({})$'.format('|'.join(star_skip_set))))
+
+    return [x for x in tests if not (x in skip_tests or re_star_tests and re_star_tests.match(x))]
+
+
+def gen_test_main(args, test_lib_args, xtest_lib_args):
+    assert args and (test_lib_args or xtest_lib_args)
+    test_miner = args.test_miner
+    test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
+    is_cover = args.cover_info and len(args.cover_info) > 0
+
+    # Prepare GOPATH
+    # $BINDIR
+    #    |- __go__
+    #        |- src
+    #        |- pkg
+    #            |- ${TARGET_OS}_${TARGET_ARCH}
+    go_path_root = os.path.join(args.output_root, '__go__')
+    test_src_dir = os.path.join(go_path_root, 'src')
+    target_os_arch = '_'.join([args.targ_os, args.targ_arch])
+    test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
+    os.makedirs(test_pkg_dir)
+
+    my_env = os.environ.copy()
+    my_env['GOROOT'] = ''
+    my_env['GOPATH'] = go_path_root
+    my_env['GOARCH'] = args.targ_arch
+    my_env['GOOS'] = args.targ_os
+
+    tests = []
+    xtests = []
+    os_symlink = get_symlink_or_copyfile()
+
+    # Get the list of "internal" tests
+    if test_lib_args:
+        os.makedirs(os.path.join(test_src_dir, test_module_path))
+        os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
+        cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
+        tests = [x for x in (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
+        if args.skip_tests:
+            tests = filter_out_skip_tests(tests, args.skip_tests)
+    test_main_found = '#TestMain' in tests
+
+    # Get the list of "external" tests
+    if xtest_lib_args:
+        xtest_module_path = xtest_lib_args.import_path
+        os.makedirs(os.path.join(test_src_dir, xtest_module_path))
+        os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
+        cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
+        xtests = [x for x in (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n') if len(x) > 0]
+        if args.skip_tests:
+            xtests = filter_out_skip_tests(xtests, args.skip_tests)
+    xtest_main_found = '#TestMain' in xtests
+
+    test_main_package = None
+    if test_main_found and xtest_main_found:
+        assert False, 'multiple definition of TestMain'
+    elif test_main_found:
+        test_main_package = '_test'
+    elif xtest_main_found:
+        test_main_package = '_xtest'
+
+    shutil.rmtree(go_path_root)
+
+    lines = ['package main', '', 'import (']
+    if test_main_package is None:
+        lines.append('    "os"')
+    lines.extend(['    "testing"', '    "testing/internal/testdeps"'])
+
+    if len(tests) > 0:
+        lines.append('    _test "{}"'.format(test_module_path))
+    elif test_lib_args:
+        lines.append('    _ "{}"'.format(test_module_path))
+
+    if len(xtests) > 0:
+        lines.append('    _xtest "{}"'.format(xtest_module_path))
+    elif xtest_lib_args:
+        lines.append('    _ "{}"'.format(xtest_module_path))
+
+    if is_cover:
+        lines.append('    _cover0 "{}"'.format(test_module_path))
+    lines.extend([')', ''])
+
+    if compare_versions('1.18', args.goversion) < 0:
+        kinds = ['Test', 'Benchmark', 'Example']
+    else:
+        kinds = ['Test', 'Benchmark', 'FuzzTarget', 'Example']
+
+    var_names = []
+    for kind in kinds:
+        var_name = '{}s'.format(kind.lower())
+        var_names.append(var_name)
+        lines.append('var {} = []testing.Internal{}{{'.format(var_name, kind))
+        for test in [x for x in tests if x.startswith(kind)]:
+            lines.append('    {{"{test}", _test.{test}}},'.format(test=test))
+        for test in [x for x in xtests if x.startswith(kind)]:
+            lines.append('    {{"{test}", _xtest.{test}}},'.format(test=test))
+        lines.extend(['}', ''])
+
+    if is_cover:
+        lines.extend(gen_cover_info(args))
+
+    lines.append('func main() {')
+    if is_cover:
+        lines.extend([
+            '    testing.RegisterCover(testing.Cover{',
+            '        Mode: "set",',
+            '        Counters: coverCounters,',
+            '        Blocks: coverBlocks,',
+            '        CoveredPackages: "",',
+            '    })',
+        ])
+    lines.extend([
+        '    m := testing.MainStart(testdeps.TestDeps{{}}, {})'.format(', '.join(var_names)),
+        '',
+    ])
+
+    if test_main_package:
+        lines.append('    {}.TestMain(m)'.format(test_main_package))
+    else:
+        lines.append('    os.Exit(m.Run())')
+    lines.extend(['}', ''])
+
+    content = '\n'.join(lines)
+    # sys.stderr.write('{}\n'.format(content))
+    return content
+
+
+def do_link_test(args):
+    assert args.srcs or args.xtest_srcs
+    assert args.test_miner is not None
+
+    test_module_path = get_source_path(args)
+    test_import_path, _ = get_import_path(test_module_path)
+
+    test_lib_args = copy_args(args) if args.srcs else None
+    xtest_lib_args = copy_args(args) if args.xtest_srcs else None
+    if xtest_lib_args is not None:
+        xtest_lib_args.embed = args.embed_xtest if args.embed_xtest else None
+
+    ydx_file_name = None
+    xtest_ydx_file_name = None
+    need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
+    if need_append_ydx:
+        def find_ydx_file_name(name, flags):
+            for i, elem in enumerate(flags):
+                if elem.endswith(name):
+                    return (i, elem)
+            assert False, 'Unreachable code'
+
+        idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
+        xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
+        xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
+        xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
+
+    if test_lib_args:
+        test_lib_args.output = os.path.join(args.output_root, 'test.a')
+        test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
+        test_lib_args.module_path = test_module_path
+        test_lib_args.import_path = test_import_path
+        do_link_lib(test_lib_args)
+
+    if xtest_lib_args:
+        xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
+        classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
+        xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
+        xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
+        xtest_lib_args.module_path = test_module_path + '_test'
+        xtest_lib_args.import_path = test_import_path + '_test'
+        if test_lib_args:
+            xtest_lib_args.module_map[test_import_path] = test_lib_args.output
+        need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
+        do_link_lib(xtest_lib_args)
+
+    if need_append_ydx:
+        with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
+            with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
+                dst_file.write(src_file.read())
+
+    test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
+    test_main_name = os.path.join(args.output_root, '_test_main.go')
+    with open(test_main_name, "w") as f:
+        f.write(test_main_content)
+    test_args = copy_args(args)
+    test_args.embed = None
+    test_args.srcs = [test_main_name]
+    if test_args.test_import_path is None:
+        # it seems that we can do it unconditionally, but this kind
+        # of mangling doesn't really looks good to me and we leave it
+        # for pure GO_TEST module
+        test_args.module_path = test_args.module_path + '___test_main__'
+        test_args.import_path = test_args.import_path + '___test_main__'
+    classify_srcs(test_args.srcs, test_args)
+    if test_lib_args:
+        test_args.module_map[test_lib_args.import_path] = test_lib_args.output
+    if xtest_lib_args:
+        test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
+
+    if args.vet:
+        dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
+    test_args.vet = False
+
+    do_link_exe(test_args)
+
+
+if __name__ == '__main__':
+    args = pcf.get_args(sys.argv[1:])
+
+    parser = argparse.ArgumentParser(prefix_chars='+')
+    parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
+    parser.add_argument('++srcs', nargs='*', required=True)
+    parser.add_argument('++cgo-srcs', nargs='*')
+    parser.add_argument('++test_srcs', nargs='*')
+    parser.add_argument('++xtest_srcs', nargs='*')
+    parser.add_argument('++cover_info', nargs='*')
+    parser.add_argument('++output', nargs='?', default=None)
+    parser.add_argument('++source-root', default=None)
+    parser.add_argument('++build-root', required=True)
+    parser.add_argument('++tools-root', default=None)
+    parser.add_argument('++output-root', required=True)
+    parser.add_argument('++toolchain-root', required=True)
+    parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
+    parser.add_argument('++host-arch', choices=['amd64', 'arm64'], required=True)
+    parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
+    parser.add_argument('++targ-arch', choices=['amd64', 'x86', 'arm64'], required=True)
+    parser.add_argument('++peers', nargs='*')
+    parser.add_argument('++non-local-peers', nargs='*')
+    parser.add_argument('++cgo-peers', nargs='*')
+    parser.add_argument('++asmhdr', nargs='?', default=None)
+    parser.add_argument('++test-import-path', nargs='?')
+    parser.add_argument('++test-miner', nargs='?')
+    parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
+    parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
+    parser.add_argument('++vendor-prefix', nargs='?', default=vendor_prefix)
+    parser.add_argument('++extld', nargs='?', default=None)
+    parser.add_argument('++extldflags', nargs='+', default=None)
+    parser.add_argument('++goversion', required=True)
+    parser.add_argument('++lang', nargs='?', default=None)
+    parser.add_argument('++asm-flags', nargs='*')
+    parser.add_argument('++compile-flags', nargs='*')
+    parser.add_argument('++link-flags', nargs='*')
+    parser.add_argument('++vcs', nargs='?', default=None)
+    parser.add_argument('++vet', nargs='?', const=True, default=False)
+    parser.add_argument('++vet-flags', nargs='*', default=None)
+    parser.add_argument('++vet-info-ext', default=vet_info_ext)
+    parser.add_argument('++vet-report-ext', default=vet_report_ext)
+    parser.add_argument('++musl', action='store_true')
+    parser.add_argument('++skip-tests', nargs='*', default=None)
+    parser.add_argument('++ydx-file', default='')
+    parser.add_argument('++debug-root-map', default=None)
+    parser.add_argument('++embed', action='append', nargs='*')
+    parser.add_argument('++embed_xtest', action='append', nargs='*')
+    args = parser.parse_args(args)
+
+    arc_project_prefix = args.arc_project_prefix
+    std_lib_prefix = args.std_lib_prefix
+    vendor_prefix = args.vendor_prefix
+    vet_info_ext = args.vet_info_ext
+    vet_report_ext = args.vet_report_ext
+
+    preprocess_args(args)
+
+    try:
+        os.unlink(args.output)
+    except OSError:
+        pass
+
+    # We are going to support only 'lib', 'exe' and 'cgo' build modes currently
+    # and as a result we are going to generate only one build node per module
+    # (or program)
+    dispatch = {
+        'exe': do_link_exe,
+        'dll': do_link_exe,
+        'lib': do_link_lib,
+        'test': do_link_test
+    }
+
+    exit_code = 1
+    try:
+        dispatch[args.mode](args)
+        exit_code = 0
+    except KeyError:
+        sys.stderr.write('Unknown build mode [{}]...\n'.format(args.mode))
+    except subprocess.CalledProcessError as e:
+        sys.stderr.write('{} returned non-zero exit code {}.\n{}\n'.format(' '.join(e.cmd), e.returncode, e.output))
+        exit_code = e.returncode
+    except Exception as e:
+        sys.stderr.write('Unhandled exception [{}]...\n'.format(str(e)))
+    sys.exit(exit_code)

+ 813 - 0
contrib/go/_std_1.18/src/bufio/bufio.go

@@ -0,0 +1,813 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
+// object, creating another object (Reader or Writer) that also implements
+// the interface but provides buffering and some help for textual I/O.
+package bufio
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"strings"
+	"unicode/utf8"
+)
+
+const (
+	defaultBufSize = 4096
+)
+
+var (
+	ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
+	ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
+	ErrBufferFull        = errors.New("bufio: buffer full")
+	ErrNegativeCount     = errors.New("bufio: negative count")
+)
+
+// Buffered input.
+
+// Reader implements buffering for an io.Reader object.
+type Reader struct {
+	buf          []byte
+	rd           io.Reader // reader provided by the client
+	r, w         int       // buf read and write positions
+	err          error
+	lastByte     int // last byte read for UnreadByte; -1 means invalid
+	lastRuneSize int // size of last rune read for UnreadRune; -1 means invalid
+}
+
+const minReadBufferSize = 16
+const maxConsecutiveEmptyReads = 100
+
+// NewReaderSize returns a new Reader whose buffer has at least the specified
+// size. If the argument io.Reader is already a Reader with large enough
+// size, it returns the underlying Reader.
+func NewReaderSize(rd io.Reader, size int) *Reader {
+	// Is it already a Reader?
+	b, ok := rd.(*Reader)
+	if ok && len(b.buf) >= size {
+		return b
+	}
+	if size < minReadBufferSize {
+		size = minReadBufferSize
+	}
+	r := new(Reader)
+	r.reset(make([]byte, size), rd)
+	return r
+}
+
+// NewReader returns a new Reader whose buffer has the default size.
+func NewReader(rd io.Reader) *Reader {
+	return NewReaderSize(rd, defaultBufSize)
+}
+
+// Size returns the size of the underlying buffer in bytes.
+func (b *Reader) Size() int { return len(b.buf) }
+
+// Reset discards any buffered data, resets all state, and switches
+// the buffered reader to read from r.
+// Calling Reset on the zero value of Reader initializes the internal buffer
+// to the default size.
+func (b *Reader) Reset(r io.Reader) {
+	if b.buf == nil {
+		b.buf = make([]byte, defaultBufSize)
+	}
+	b.reset(b.buf, r)
+}
+
+func (b *Reader) reset(buf []byte, r io.Reader) {
+	*b = Reader{
+		buf:          buf,
+		rd:           r,
+		lastByte:     -1,
+		lastRuneSize: -1,
+	}
+}
+
+var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
+
+// fill reads a new chunk into the buffer.
+func (b *Reader) fill() {
+	// Slide existing data to beginning.
+	if b.r > 0 {
+		copy(b.buf, b.buf[b.r:b.w])
+		b.w -= b.r
+		b.r = 0
+	}
+
+	if b.w >= len(b.buf) {
+		panic("bufio: tried to fill full buffer")
+	}
+
+	// Read new data: try a limited number of times.
+	for i := maxConsecutiveEmptyReads; i > 0; i-- {
+		n, err := b.rd.Read(b.buf[b.w:])
+		if n < 0 {
+			panic(errNegativeRead)
+		}
+		b.w += n
+		if err != nil {
+			b.err = err
+			return
+		}
+		if n > 0 {
+			return
+		}
+	}
+	b.err = io.ErrNoProgress
+}
+
+func (b *Reader) readErr() error {
+	err := b.err
+	b.err = nil
+	return err
+}
+
+// Peek returns the next n bytes without advancing the reader. The bytes stop
+// being valid at the next read call. If Peek returns fewer than n bytes, it
+// also returns an error explaining why the read is short. The error is
+// ErrBufferFull if n is larger than b's buffer size.
+//
+// Calling Peek prevents a UnreadByte or UnreadRune call from succeeding
+// until the next read operation.
+func (b *Reader) Peek(n int) ([]byte, error) {
+	if n < 0 {
+		return nil, ErrNegativeCount
+	}
+
+	b.lastByte = -1
+	b.lastRuneSize = -1
+
+	for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
+		b.fill() // b.w-b.r < len(b.buf) => buffer is not full
+	}
+
+	if n > len(b.buf) {
+		return b.buf[b.r:b.w], ErrBufferFull
+	}
+
+	// 0 <= n <= len(b.buf)
+	var err error
+	if avail := b.w - b.r; avail < n {
+		// not enough data in buffer
+		n = avail
+		err = b.readErr()
+		if err == nil {
+			err = ErrBufferFull
+		}
+	}
+	return b.buf[b.r : b.r+n], err
+}
+
+// Discard skips the next n bytes, returning the number of bytes discarded.
+//
+// If Discard skips fewer than n bytes, it also returns an error.
+// If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without
+// reading from the underlying io.Reader.
+func (b *Reader) Discard(n int) (discarded int, err error) {
+	if n < 0 {
+		return 0, ErrNegativeCount
+	}
+	if n == 0 {
+		return
+	}
+
+	b.lastByte = -1
+	b.lastRuneSize = -1
+
+	remain := n
+	for {
+		skip := b.Buffered()
+		if skip == 0 {
+			b.fill()
+			skip = b.Buffered()
+		}
+		if skip > remain {
+			skip = remain
+		}
+		b.r += skip
+		remain -= skip
+		if remain == 0 {
+			return n, nil
+		}
+		if b.err != nil {
+			return n - remain, b.readErr()
+		}
+	}
+}
+
+// Read reads data into p.
+// It returns the number of bytes read into p.
+// The bytes are taken from at most one Read on the underlying Reader,
+// hence n may be less than len(p).
+// To read exactly len(p) bytes, use io.ReadFull(b, p).
+// At EOF, the count will be zero and err will be io.EOF.
+func (b *Reader) Read(p []byte) (n int, err error) {
+	n = len(p)
+	if n == 0 {
+		if b.Buffered() > 0 {
+			return 0, nil
+		}
+		return 0, b.readErr()
+	}
+	if b.r == b.w {
+		if b.err != nil {
+			return 0, b.readErr()
+		}
+		if len(p) >= len(b.buf) {
+			// Large read, empty buffer.
+			// Read directly into p to avoid copy.
+			n, b.err = b.rd.Read(p)
+			if n < 0 {
+				panic(errNegativeRead)
+			}
+			if n > 0 {
+				b.lastByte = int(p[n-1])
+				b.lastRuneSize = -1
+			}
+			return n, b.readErr()
+		}
+		// One read.
+		// Do not use b.fill, which will loop.
+		b.r = 0
+		b.w = 0
+		n, b.err = b.rd.Read(b.buf)
+		if n < 0 {
+			panic(errNegativeRead)
+		}
+		if n == 0 {
+			return 0, b.readErr()
+		}
+		b.w += n
+	}
+
+	// copy as much as we can
+	// Note: if the slice panics here, it is probably because
+	// the underlying reader returned a bad count. See issue 49795.
+	n = copy(p, b.buf[b.r:b.w])
+	b.r += n
+	b.lastByte = int(b.buf[b.r-1])
+	b.lastRuneSize = -1
+	return n, nil
+}
+
+// ReadByte reads and returns a single byte.
+// If no byte is available, returns an error.
+func (b *Reader) ReadByte() (byte, error) {
+	b.lastRuneSize = -1
+	for b.r == b.w {
+		if b.err != nil {
+			return 0, b.readErr()
+		}
+		b.fill() // buffer is empty
+	}
+	c := b.buf[b.r]
+	b.r++
+	b.lastByte = int(c)
+	return c, nil
+}
+
+// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
+//
+// UnreadByte returns an error if the most recent method called on the
+// Reader was not a read operation. Notably, Peek, Discard, and WriteTo are not
+// considered read operations.
+func (b *Reader) UnreadByte() error {
+	if b.lastByte < 0 || b.r == 0 && b.w > 0 {
+		return ErrInvalidUnreadByte
+	}
+	// b.r > 0 || b.w == 0
+	if b.r > 0 {
+		b.r--
+	} else {
+		// b.r == 0 && b.w == 0
+		b.w = 1
+	}
+	b.buf[b.r] = byte(b.lastByte)
+	b.lastByte = -1
+	b.lastRuneSize = -1
+	return nil
+}
+
+// ReadRune reads a single UTF-8 encoded Unicode character and returns the
+// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
+// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
+func (b *Reader) ReadRune() (r rune, size int, err error) {
+	for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
+		b.fill() // b.w-b.r < len(buf) => buffer is not full
+	}
+	b.lastRuneSize = -1
+	if b.r == b.w {
+		return 0, 0, b.readErr()
+	}
+	r, size = rune(b.buf[b.r]), 1
+	if r >= utf8.RuneSelf {
+		r, size = utf8.DecodeRune(b.buf[b.r:b.w])
+	}
+	b.r += size
+	b.lastByte = int(b.buf[b.r-1])
+	b.lastRuneSize = size
+	return r, size, nil
+}
+
+// UnreadRune unreads the last rune. If the most recent method called on
+// the Reader was not a ReadRune, UnreadRune returns an error. (In this
+// regard it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Reader) UnreadRune() error {
+	if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
+		return ErrInvalidUnreadRune
+	}
+	b.r -= b.lastRuneSize
+	b.lastByte = -1
+	b.lastRuneSize = -1
+	return nil
+}
+
+// Buffered returns the number of bytes that can be read from the current buffer.
+func (b *Reader) Buffered() int { return b.w - b.r }
+
+// ReadSlice reads until the first occurrence of delim in the input,
+// returning a slice pointing at the bytes in the buffer.
+// The bytes stop being valid at the next read.
+// If ReadSlice encounters an error before finding a delimiter,
+// it returns all the data in the buffer and the error itself (often io.EOF).
+// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
+// Because the data returned from ReadSlice will be overwritten
+// by the next I/O operation, most clients should use
+// ReadBytes or ReadString instead.
+// ReadSlice returns err != nil if and only if line does not end in delim.
+func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
+	s := 0 // search start index
+	for {
+		// Search buffer.
+		if i := bytes.IndexByte(b.buf[b.r+s:b.w], delim); i >= 0 {
+			i += s
+			line = b.buf[b.r : b.r+i+1]
+			b.r += i + 1
+			break
+		}
+
+		// Pending error?
+		if b.err != nil {
+			line = b.buf[b.r:b.w]
+			b.r = b.w
+			err = b.readErr()
+			break
+		}
+
+		// Buffer full?
+		if b.Buffered() >= len(b.buf) {
+			b.r = b.w
+			line = b.buf
+			err = ErrBufferFull
+			break
+		}
+
+		s = b.w - b.r // do not rescan area we scanned before
+
+		b.fill() // buffer is not full
+	}
+
+	// Handle last byte, if any.
+	if i := len(line) - 1; i >= 0 {
+		b.lastByte = int(line[i])
+		b.lastRuneSize = -1
+	}
+
+	return
+}
+
+// ReadLine is a low-level line-reading primitive. Most callers should use
+// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
+//
+// ReadLine tries to return a single line, not including the end-of-line bytes.
+// If the line was too long for the buffer then isPrefix is set and the
+// beginning of the line is returned. The rest of the line will be returned
+// from future calls. isPrefix will be false when returning the last fragment
+// of the line. The returned buffer is only valid until the next call to
+// ReadLine. ReadLine either returns a non-nil line or it returns an error,
+// never both.
+//
+// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
+// No indication or error is given if the input ends without a final line end.
+// Calling UnreadByte after ReadLine will always unread the last byte read
+// (possibly a character belonging to the line end) even if that byte is not
+// part of the line returned by ReadLine.
+func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
+	line, err = b.ReadSlice('\n')
+	if err == ErrBufferFull {
+		// Handle the case where "\r\n" straddles the buffer.
+		if len(line) > 0 && line[len(line)-1] == '\r' {
+			// Put the '\r' back on buf and drop it from line.
+			// Let the next call to ReadLine check for "\r\n".
+			if b.r == 0 {
+				// should be unreachable
+				panic("bufio: tried to rewind past start of buffer")
+			}
+			b.r--
+			line = line[:len(line)-1]
+		}
+		return line, true, nil
+	}
+
+	if len(line) == 0 {
+		if err != nil {
+			line = nil
+		}
+		return
+	}
+	err = nil
+
+	if line[len(line)-1] == '\n' {
+		drop := 1
+		if len(line) > 1 && line[len(line)-2] == '\r' {
+			drop = 2
+		}
+		line = line[:len(line)-drop]
+	}
+	return
+}
+
+// collectFragments reads until the first occurrence of delim in the input. It
+// returns (slice of full buffers, remaining bytes before delim, total number
+// of bytes in the combined first two elements, error).
+// The complete result is equal to
+// `bytes.Join(append(fullBuffers, finalFragment), nil)`, which has a
+// length of `totalLen`. The result is structured in this way to allow callers
+// to minimize allocations and copies.
+func (b *Reader) collectFragments(delim byte) (fullBuffers [][]byte, finalFragment []byte, totalLen int, err error) {
+	var frag []byte
+	// Use ReadSlice to look for delim, accumulating full buffers.
+	for {
+		var e error
+		frag, e = b.ReadSlice(delim)
+		if e == nil { // got final fragment
+			break
+		}
+		if e != ErrBufferFull { // unexpected error
+			err = e
+			break
+		}
+
+		// Make a copy of the buffer.
+		buf := make([]byte, len(frag))
+		copy(buf, frag)
+		fullBuffers = append(fullBuffers, buf)
+		totalLen += len(buf)
+	}
+
+	totalLen += len(frag)
+	return fullBuffers, frag, totalLen, err
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadBytes(delim byte) ([]byte, error) {
+	full, frag, n, err := b.collectFragments(delim)
+	// Allocate new buffer to hold the full pieces and the fragment.
+	buf := make([]byte, n)
+	n = 0
+	// Copy full pieces and fragment in.
+	for i := range full {
+		n += copy(buf[n:], full[i])
+	}
+	copy(buf[n:], frag)
+	return buf, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end in
+// delim.
+// For simple uses, a Scanner may be more convenient.
+func (b *Reader) ReadString(delim byte) (string, error) {
+	full, frag, n, err := b.collectFragments(delim)
+	// Allocate new buffer to hold the full pieces and the fragment.
+	var buf strings.Builder
+	buf.Grow(n)
+	// Copy full pieces and fragment in.
+	for _, fb := range full {
+		buf.Write(fb)
+	}
+	buf.Write(frag)
+	return buf.String(), err
+}
+
+// WriteTo implements io.WriterTo.
+// This may make multiple calls to the Read method of the underlying Reader.
+// If the underlying reader supports the WriteTo method,
+// this calls the underlying WriteTo without buffering.
+func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
+	b.lastByte = -1
+	b.lastRuneSize = -1
+
+	n, err = b.writeBuf(w)
+	if err != nil {
+		return
+	}
+
+	if r, ok := b.rd.(io.WriterTo); ok {
+		m, err := r.WriteTo(w)
+		n += m
+		return n, err
+	}
+
+	if w, ok := w.(io.ReaderFrom); ok {
+		m, err := w.ReadFrom(b.rd)
+		n += m
+		return n, err
+	}
+
+	if b.w-b.r < len(b.buf) {
+		b.fill() // buffer not full
+	}
+
+	for b.r < b.w {
+		// b.r < b.w => buffer is not empty
+		m, err := b.writeBuf(w)
+		n += m
+		if err != nil {
+			return n, err
+		}
+		b.fill() // buffer is empty
+	}
+
+	if b.err == io.EOF {
+		b.err = nil
+	}
+
+	return n, b.readErr()
+}
+
+var errNegativeWrite = errors.New("bufio: writer returned negative count from Write")
+
+// writeBuf writes the Reader's buffer to the writer.
+func (b *Reader) writeBuf(w io.Writer) (int64, error) {
+	n, err := w.Write(b.buf[b.r:b.w])
+	if n < 0 {
+		panic(errNegativeWrite)
+	}
+	b.r += n
+	return int64(n), err
+}
+
+// buffered output
+
+// Writer implements buffering for an io.Writer object.
+// If an error occurs writing to a Writer, no more data will be
+// accepted and all subsequent writes, and Flush, will return the error.
+// After all data has been written, the client should call the
+// Flush method to guarantee all data has been forwarded to
+// the underlying io.Writer.
+type Writer struct {
+	err error
+	buf []byte
+	n   int
+	wr  io.Writer
+}
+
+// NewWriterSize returns a new Writer whose buffer has at least the specified
+// size. If the argument io.Writer is already a Writer with large enough
+// size, it returns the underlying Writer.
+func NewWriterSize(w io.Writer, size int) *Writer {
+	// Is it already a Writer?
+	b, ok := w.(*Writer)
+	if ok && len(b.buf) >= size {
+		return b
+	}
+	if size <= 0 {
+		size = defaultBufSize
+	}
+	return &Writer{
+		buf: make([]byte, size),
+		wr:  w,
+	}
+}
+
+// NewWriter returns a new Writer whose buffer has the default size.
+// If the argument io.Writer is already a Writer with large enough buffer size,
+// it returns the underlying Writer.
+func NewWriter(w io.Writer) *Writer {
+	return NewWriterSize(w, defaultBufSize)
+}
+
+// Size returns the size of the underlying buffer in bytes.
+func (b *Writer) Size() int { return len(b.buf) }
+
+// Reset discards any unflushed buffered data, clears any error, and
+// resets b to write its output to w.
+// Calling Reset on the zero value of Writer initializes the internal buffer
+// to the default size.
+func (b *Writer) Reset(w io.Writer) {
+	if b.buf == nil {
+		b.buf = make([]byte, defaultBufSize)
+	}
+	b.err = nil
+	b.n = 0
+	b.wr = w
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (b *Writer) Flush() error {
+	if b.err != nil {
+		return b.err
+	}
+	if b.n == 0 {
+		return nil
+	}
+	n, err := b.wr.Write(b.buf[0:b.n])
+	if n < b.n && err == nil {
+		err = io.ErrShortWrite
+	}
+	if err != nil {
+		if n > 0 && n < b.n {
+			copy(b.buf[0:b.n-n], b.buf[n:b.n])
+		}
+		b.n -= n
+		b.err = err
+		return err
+	}
+	b.n = 0
+	return nil
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (b *Writer) Available() int { return len(b.buf) - b.n }
+
+// AvailableBuffer returns an empty buffer with b.Available() capacity.
+// This buffer is intended to be appended to and
+// passed to an immediately succeeding Write call.
+// The buffer is only valid until the next write operation on b.
+func (b *Writer) AvailableBuffer() []byte {
+	return b.buf[b.n:][:0]
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (b *Writer) Buffered() int { return b.n }
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (b *Writer) Write(p []byte) (nn int, err error) {
+	for len(p) > b.Available() && b.err == nil {
+		var n int
+		if b.Buffered() == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, b.err = b.wr.Write(p)
+		} else {
+			n = copy(b.buf[b.n:], p)
+			b.n += n
+			b.Flush()
+		}
+		nn += n
+		p = p[n:]
+	}
+	if b.err != nil {
+		return nn, b.err
+	}
+	n := copy(b.buf[b.n:], p)
+	b.n += n
+	nn += n
+	return nn, nil
+}
+
+// WriteByte writes a single byte.
+func (b *Writer) WriteByte(c byte) error {
+	if b.err != nil {
+		return b.err
+	}
+	if b.Available() <= 0 && b.Flush() != nil {
+		return b.err
+	}
+	b.buf[b.n] = c
+	b.n++
+	return nil
+}
+
+// WriteRune writes a single Unicode code point, returning
+// the number of bytes written and any error.
+func (b *Writer) WriteRune(r rune) (size int, err error) {
+	// Compare as uint32 to correctly handle negative runes.
+	if uint32(r) < utf8.RuneSelf {
+		err = b.WriteByte(byte(r))
+		if err != nil {
+			return 0, err
+		}
+		return 1, nil
+	}
+	if b.err != nil {
+		return 0, b.err
+	}
+	n := b.Available()
+	if n < utf8.UTFMax {
+		if b.Flush(); b.err != nil {
+			return 0, b.err
+		}
+		n = b.Available()
+		if n < utf8.UTFMax {
+			// Can only happen if buffer is silly small.
+			return b.WriteString(string(r))
+		}
+	}
+	size = utf8.EncodeRune(b.buf[b.n:], r)
+	b.n += size
+	return size, nil
+}
+
+// WriteString writes a string.
+// It returns the number of bytes written.
+// If the count is less than len(s), it also returns an error explaining
+// why the write is short.
+func (b *Writer) WriteString(s string) (int, error) {
+	nn := 0
+	for len(s) > b.Available() && b.err == nil {
+		n := copy(b.buf[b.n:], s)
+		b.n += n
+		nn += n
+		s = s[n:]
+		b.Flush()
+	}
+	if b.err != nil {
+		return nn, b.err
+	}
+	n := copy(b.buf[b.n:], s)
+	b.n += n
+	nn += n
+	return nn, nil
+}
+
+// ReadFrom implements io.ReaderFrom. If the underlying writer
+// supports the ReadFrom method, this calls the underlying ReadFrom.
+// If there is buffered data and an underlying ReadFrom, this fills
+// the buffer and writes it before calling ReadFrom.
+func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+	if b.err != nil {
+		return 0, b.err
+	}
+	readerFrom, readerFromOK := b.wr.(io.ReaderFrom)
+	var m int
+	for {
+		if b.Available() == 0 {
+			if err1 := b.Flush(); err1 != nil {
+				return n, err1
+			}
+		}
+		if readerFromOK && b.Buffered() == 0 {
+			nn, err := readerFrom.ReadFrom(r)
+			b.err = err
+			n += nn
+			return n, err
+		}
+		nr := 0
+		for nr < maxConsecutiveEmptyReads {
+			m, err = r.Read(b.buf[b.n:])
+			if m != 0 || err != nil {
+				break
+			}
+			nr++
+		}
+		if nr == maxConsecutiveEmptyReads {
+			return n, io.ErrNoProgress
+		}
+		b.n += m
+		n += int64(m)
+		if err != nil {
+			break
+		}
+	}
+	if err == io.EOF {
+		// If we filled the buffer exactly, flush preemptively.
+		if b.Available() == 0 {
+			err = b.Flush()
+		} else {
+			err = nil
+		}
+	}
+	return n, err
+}
+
+// buffered input and output
+
+// ReadWriter stores pointers to a Reader and a Writer.
+// It implements io.ReadWriter.
+type ReadWriter struct {
+	*Reader
+	*Writer
+}
+
+// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
+func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
+	return &ReadWriter{r, w}
+}

+ 420 - 0
contrib/go/_std_1.18/src/bufio/scan.go

@@ -0,0 +1,420 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufio
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"unicode/utf8"
+)
+
+// Scanner provides a convenient interface for reading data such as
+// a file of newline-delimited lines of text. Successive calls to
+// the Scan method will step through the 'tokens' of a file, skipping
+// the bytes between the tokens. The specification of a token is
+// defined by a split function of type SplitFunc; the default split
+// function breaks the input into lines with line termination stripped. Split
+// functions are defined in this package for scanning a file into
+// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
+// client may instead provide a custom split function.
+//
+// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
+// large to fit in the buffer. When a scan stops, the reader may have
+// advanced arbitrarily far past the last token. Programs that need more
+// control over error handling or large tokens, or must run sequential scans
+// on a reader, should use bufio.Reader instead.
+//
+type Scanner struct {
+	r            io.Reader // The reader provided by the client.
+	split        SplitFunc // The function to split the tokens.
+	maxTokenSize int       // Maximum size of a token; modified by tests.
+	token        []byte    // Last token returned by split.
+	buf          []byte    // Buffer used as argument to split.
+	start        int       // First non-processed byte in buf.
+	end          int       // End of data in buf.
+	err          error     // Sticky error.
+	empties      int       // Count of successive empty tokens.
+	scanCalled   bool      // Scan has been called; buffer is in use.
+	done         bool      // Scan has finished.
+}
+
+// SplitFunc is the signature of the split function used to tokenize the
+// input. The arguments are an initial substring of the remaining unprocessed
+// data and a flag, atEOF, that reports whether the Reader has no more data
+// to give. The return values are the number of bytes to advance the input
+// and the next token to return to the user, if any, plus an error, if any.
+//
+// Scanning stops if the function returns an error, in which case some of
+// the input may be discarded. If that error is ErrFinalToken, scanning
+// stops with no error.
+//
+// Otherwise, the Scanner advances the input. If the token is not nil,
+// the Scanner returns it to the user. If the token is nil, the
+// Scanner reads more data and continues scanning; if there is no more
+// data--if atEOF was true--the Scanner returns. If the data does not
+// yet hold a complete token, for instance if it has no newline while
+// scanning lines, a SplitFunc can return (0, nil, nil) to signal the
+// Scanner to read more data into the slice and try again with a
+// longer slice starting at the same point in the input.
+//
+// The function is never called with an empty data slice unless atEOF
+// is true. If atEOF is true, however, data may be non-empty and,
+// as always, holds unprocessed text.
+type SplitFunc func(data []byte, atEOF bool) (advance int, token []byte, err error)
+
+// Errors returned by Scanner.
+var (
+	ErrTooLong         = errors.New("bufio.Scanner: token too long")
+	ErrNegativeAdvance = errors.New("bufio.Scanner: SplitFunc returns negative advance count")
+	ErrAdvanceTooFar   = errors.New("bufio.Scanner: SplitFunc returns advance count beyond input")
+	ErrBadReadCount    = errors.New("bufio.Scanner: Read returned impossible count")
+)
+
+const (
+	// MaxScanTokenSize is the maximum size used to buffer a token
+	// unless the user provides an explicit buffer with Scanner.Buffer.
+	// The actual maximum token size may be smaller as the buffer
+	// may need to include, for instance, a newline.
+	MaxScanTokenSize = 64 * 1024
+
+	startBufSize = 4096 // Size of initial allocation for buffer.
+)
+
+// NewScanner returns a new Scanner to read from r.
+// The split function defaults to ScanLines.
+func NewScanner(r io.Reader) *Scanner {
+	return &Scanner{
+		r:            r,
+		split:        ScanLines,
+		maxTokenSize: MaxScanTokenSize,
+	}
+}
+
+// Err returns the first non-EOF error that was encountered by the Scanner.
+func (s *Scanner) Err() error {
+	if s.err == io.EOF {
+		return nil
+	}
+	return s.err
+}
+
+// Bytes returns the most recent token generated by a call to Scan.
+// The underlying array may point to data that will be overwritten
+// by a subsequent call to Scan. It does no allocation.
+func (s *Scanner) Bytes() []byte {
+	return s.token
+}
+
+// Text returns the most recent token generated by a call to Scan
+// as a newly allocated string holding its bytes.
+func (s *Scanner) Text() string {
+	return string(s.token)
+}
+
+// ErrFinalToken is a special sentinel error value. It is intended to be
+// returned by a Split function to indicate that the token being delivered
+// with the error is the last token and scanning should stop after this one.
+// After ErrFinalToken is received by Scan, scanning stops with no error.
+// The value is useful to stop processing early or when it is necessary to
+// deliver a final empty token. One could achieve the same behavior
+// with a custom error value but providing one here is tidier.
+// See the emptyFinalToken example for a use of this value.
+var ErrFinalToken = errors.New("final token")
+
+// Scan advances the Scanner to the next token, which will then be
+// available through the Bytes or Text method. It returns false when the
+// scan stops, either by reaching the end of the input or an error.
+// After Scan returns false, the Err method will return any error that
+// occurred during scanning, except that if it was io.EOF, Err
+// will return nil.
+// Scan panics if the split function returns too many empty
+// tokens without advancing the input. This is a common error mode for
+// scanners.
+func (s *Scanner) Scan() bool {
+	if s.done {
+		return false
+	}
+	s.scanCalled = true
+	// Loop until we have a token.
+	for {
+		// See if we can get a token with what we already have.
+		// If we've run out of data but have an error, give the split function
+		// a chance to recover any remaining, possibly empty token.
+		if s.end > s.start || s.err != nil {
+			advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
+			if err != nil {
+				if err == ErrFinalToken {
+					s.token = token
+					s.done = true
+					return true
+				}
+				s.setErr(err)
+				return false
+			}
+			if !s.advance(advance) {
+				return false
+			}
+			s.token = token
+			if token != nil {
+				if s.err == nil || advance > 0 {
+					s.empties = 0
+				} else {
+					// Returning tokens not advancing input at EOF.
+					s.empties++
+					if s.empties > maxConsecutiveEmptyReads {
+						panic("bufio.Scan: too many empty tokens without progressing")
+					}
+				}
+				return true
+			}
+		}
+		// We cannot generate a token with what we are holding.
+		// If we've already hit EOF or an I/O error, we are done.
+		if s.err != nil {
+			// Shut it down.
+			s.start = 0
+			s.end = 0
+			return false
+		}
+		// Must read more data.
+		// First, shift data to beginning of buffer if there's lots of empty space
+		// or space is needed.
+		if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
+			copy(s.buf, s.buf[s.start:s.end])
+			s.end -= s.start
+			s.start = 0
+		}
+		// Is the buffer full? If so, resize.
+		if s.end == len(s.buf) {
+			// Guarantee no overflow in the multiplication below.
+			const maxInt = int(^uint(0) >> 1)
+			if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
+				s.setErr(ErrTooLong)
+				return false
+			}
+			newSize := len(s.buf) * 2
+			if newSize == 0 {
+				newSize = startBufSize
+			}
+			if newSize > s.maxTokenSize {
+				newSize = s.maxTokenSize
+			}
+			newBuf := make([]byte, newSize)
+			copy(newBuf, s.buf[s.start:s.end])
+			s.buf = newBuf
+			s.end -= s.start
+			s.start = 0
+		}
+		// Finally we can read some input. Make sure we don't get stuck with
+		// a misbehaving Reader. Officially we don't need to do this, but let's
+		// be extra careful: Scanner is for safe, simple jobs.
+		for loop := 0; ; {
+			n, err := s.r.Read(s.buf[s.end:len(s.buf)])
+			if n < 0 || len(s.buf)-s.end < n {
+				s.setErr(ErrBadReadCount)
+				break
+			}
+			s.end += n
+			if err != nil {
+				s.setErr(err)
+				break
+			}
+			if n > 0 {
+				s.empties = 0
+				break
+			}
+			loop++
+			if loop > maxConsecutiveEmptyReads {
+				s.setErr(io.ErrNoProgress)
+				break
+			}
+		}
+	}
+}
+
+// advance consumes n bytes of the buffer. It reports whether the advance was legal.
+func (s *Scanner) advance(n int) bool {
+	if n < 0 {
+		s.setErr(ErrNegativeAdvance)
+		return false
+	}
+	if n > s.end-s.start {
+		s.setErr(ErrAdvanceTooFar)
+		return false
+	}
+	s.start += n
+	return true
+}
+
+// setErr records the first error encountered.
+func (s *Scanner) setErr(err error) {
+	if s.err == nil || s.err == io.EOF {
+		s.err = err
+	}
+}
+
+// Buffer sets the initial buffer to use when scanning and the maximum
+// size of buffer that may be allocated during scanning. The maximum
+// token size is the larger of max and cap(buf). If max <= cap(buf),
+// Scan will use this buffer only and do no allocation.
+//
+// By default, Scan uses an internal buffer and sets the
+// maximum token size to MaxScanTokenSize.
+//
+// Buffer panics if it is called after scanning has started.
+func (s *Scanner) Buffer(buf []byte, max int) {
+	if s.scanCalled {
+		panic("Buffer called after Scan")
+	}
+	s.buf = buf[0:cap(buf)]
+	s.maxTokenSize = max
+}
+
+// Split sets the split function for the Scanner.
+// The default split function is ScanLines.
+//
+// Split panics if it is called after scanning has started.
+func (s *Scanner) Split(split SplitFunc) {
+	if s.scanCalled {
+		panic("Split called after Scan")
+	}
+	s.split = split
+}
+
+// Split functions
+
+// ScanBytes is a split function for a Scanner that returns each byte as a token.
+func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	if atEOF && len(data) == 0 {
+		return 0, nil, nil
+	}
+	return 1, data[0:1], nil
+}
+
+var errorRune = []byte(string(utf8.RuneError))
+
+// ScanRunes is a split function for a Scanner that returns each
+// UTF-8-encoded rune as a token. The sequence of runes returned is
+// equivalent to that from a range loop over the input as a string, which
+// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
+// Because of the Scan interface, this makes it impossible for the client to
+// distinguish correctly encoded replacement runes from encoding errors.
+func ScanRunes(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	if atEOF && len(data) == 0 {
+		return 0, nil, nil
+	}
+
+	// Fast path 1: ASCII.
+	if data[0] < utf8.RuneSelf {
+		return 1, data[0:1], nil
+	}
+
+	// Fast path 2: Correct UTF-8 decode without error.
+	_, width := utf8.DecodeRune(data)
+	if width > 1 {
+		// It's a valid encoding. Width cannot be one for a correctly encoded
+		// non-ASCII rune.
+		return width, data[0:width], nil
+	}
+
+	// We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
+	// Is the error because there wasn't a full rune to be decoded?
+	// FullRune distinguishes correctly between erroneous and incomplete encodings.
+	if !atEOF && !utf8.FullRune(data) {
+		// Incomplete; get more bytes.
+		return 0, nil, nil
+	}
+
+	// We have a real UTF-8 encoding error. Return a properly encoded error rune
+	// but advance only one byte. This matches the behavior of a range loop over
+	// an incorrectly encoded string.
+	return 1, errorRune, nil
+}
+
+// dropCR drops a terminal \r from the data.
+func dropCR(data []byte) []byte {
+	if len(data) > 0 && data[len(data)-1] == '\r' {
+		return data[0 : len(data)-1]
+	}
+	return data
+}
+
+// ScanLines is a split function for a Scanner that returns each line of
+// text, stripped of any trailing end-of-line marker. The returned line may
+// be empty. The end-of-line marker is one optional carriage return followed
+// by one mandatory newline. In regular expression notation, it is `\r?\n`.
+// The last non-empty line of input will be returned even if it has no
+// newline.
+func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	if atEOF && len(data) == 0 {
+		return 0, nil, nil
+	}
+	if i := bytes.IndexByte(data, '\n'); i >= 0 {
+		// We have a full newline-terminated line.
+		return i + 1, dropCR(data[0:i]), nil
+	}
+	// If we're at EOF, we have a final, non-terminated line. Return it.
+	if atEOF {
+		return len(data), dropCR(data), nil
+	}
+	// Request more data.
+	return 0, nil, nil
+}
+
+// isSpace reports whether the character is a Unicode white space character.
+// We avoid dependency on the unicode package, but check validity of the implementation
+// in the tests.
+func isSpace(r rune) bool {
+	if r <= '\u00FF' {
+		// Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
+		switch r {
+		case ' ', '\t', '\n', '\v', '\f', '\r':
+			return true
+		case '\u0085', '\u00A0':
+			return true
+		}
+		return false
+	}
+	// High-valued ones.
+	if '\u2000' <= r && r <= '\u200a' {
+		return true
+	}
+	switch r {
+	case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
+		return true
+	}
+	return false
+}
+
+// ScanWords is a split function for a Scanner that returns each
+// space-separated word of text, with surrounding spaces deleted. It will
+// never return an empty string. The definition of space is set by
+// unicode.IsSpace.
+func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	// Skip leading spaces.
+	start := 0
+	for width := 0; start < len(data); start += width {
+		var r rune
+		r, width = utf8.DecodeRune(data[start:])
+		if !isSpace(r) {
+			break
+		}
+	}
+	// Scan until space, marking end of word.
+	for width, i := 0, start; i < len(data); i += width {
+		var r rune
+		r, width = utf8.DecodeRune(data[i:])
+		if isSpace(r) {
+			return i + width, data[start:i], nil
+		}
+	}
+	// If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
+	if atEOF && len(data) > start {
+		return len(data), data[start:], nil
+	}
+	// Request more data.
+	return start, nil, nil
+}

+ 461 - 0
contrib/go/_std_1.18/src/bytes/buffer.go

@@ -0,0 +1,461 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+// Simple byte buffer for marshaling data.
+
+import (
+	"errors"
+	"io"
+	"unicode/utf8"
+)
+
+// smallBufferSize is an initial allocation minimal capacity.
+const smallBufferSize = 64
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+	buf      []byte // contents are the bytes buf[off : len(buf)]
+	off      int    // read at &buf[off], write at &buf[len(buf)]
+	lastRead readOp // last read operation, so that Unread* can work correctly.
+}
+
+// The readOp constants describe the last action performed on
+// the buffer, so that UnreadRune and UnreadByte can check for
+// invalid usage. opReadRuneX constants are chosen such that
+// converted to int they correspond to the rune size that was read.
+type readOp int8
+
+// Don't use iota for these, as the values need to correspond with the
+// names and comments, which is easier to see when being explicit.
+const (
+	opRead      readOp = -1 // Any other read operation.
+	opInvalid   readOp = 0  // Non-read operation.
+	opReadRune1 readOp = 1  // Read rune of size 1.
+	opReadRune2 readOp = 2  // Read rune of size 2.
+	opReadRune3 readOp = 3  // Read rune of size 3.
+	opReadRune4 readOp = 4  // Read rune of size 4.
+)
+
+// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
+var ErrTooLarge = errors.New("bytes.Buffer: too large")
+var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
+
+const maxInt = int(^uint(0) >> 1)
+
+// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
+// The slice is valid for use only until the next buffer modification (that is,
+// only until the next call to a method like Read, Write, Reset, or Truncate).
+// The slice aliases the buffer content at least until the next buffer modification,
+// so immediate changes to the slice will affect the result of future reads.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+//
+// To build strings more efficiently, see the strings.Builder type.
+func (b *Buffer) String() string {
+	if b == nil {
+		// Special case, useful in debugging.
+		return "<nil>"
+	}
+	return string(b.buf[b.off:])
+}
+
+// empty reports whether the unread portion of the buffer is empty.
+func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Cap returns the capacity of the buffer's underlying byte slice, that is, the
+// total space allocated for the buffer's data.
+func (b *Buffer) Cap() int { return cap(b.buf) }
+
+// Truncate discards all but the first n unread bytes from the buffer
+// but continues to use the same allocated storage.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+	if n == 0 {
+		b.Reset()
+		return
+	}
+	b.lastRead = opInvalid
+	if n < 0 || n > b.Len() {
+		panic("bytes.Buffer: truncation out of range")
+	}
+	b.buf = b.buf[:b.off+n]
+}
+
+// Reset resets the buffer to be empty,
+// but it retains the underlying storage for use by future writes.
+// Reset is the same as Truncate(0).
+func (b *Buffer) Reset() {
+	b.buf = b.buf[:0]
+	b.off = 0
+	b.lastRead = opInvalid
+}
+
+// tryGrowByReslice is a inlineable version of grow for the fast-case where the
+// internal buffer only needs to be resliced.
+// It returns the index where bytes should be written and whether it succeeded.
+func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
+	if l := len(b.buf); n <= cap(b.buf)-l {
+		b.buf = b.buf[:l+n]
+		return l, true
+	}
+	return 0, false
+}
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+	m := b.Len()
+	// If buffer is empty, reset to recover space.
+	if m == 0 && b.off != 0 {
+		b.Reset()
+	}
+	// Try to grow by means of a reslice.
+	if i, ok := b.tryGrowByReslice(n); ok {
+		return i
+	}
+	if b.buf == nil && n <= smallBufferSize {
+		b.buf = make([]byte, n, smallBufferSize)
+		return 0
+	}
+	c := cap(b.buf)
+	if n <= c/2-m {
+		// We can slide things down instead of allocating a new
+		// slice. We only need m+n <= c to slide, but
+		// we instead let capacity get twice as large so we
+		// don't spend all our time copying.
+		copy(b.buf, b.buf[b.off:])
+	} else if c > maxInt-c-n {
+		panic(ErrTooLarge)
+	} else {
+		// Not enough space anywhere, we need to allocate.
+		buf := makeSlice(2*c + n)
+		copy(buf, b.buf[b.off:])
+		b.buf = buf
+	}
+	// Restore b.off and len(b.buf).
+	b.off = 0
+	b.buf = b.buf[:m+n]
+	return m
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+	if n < 0 {
+		panic("bytes.Buffer.Grow: negative count")
+	}
+	m := b.grow(n)
+	b.buf = b.buf[:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+	b.lastRead = opInvalid
+	m, ok := b.tryGrowByReslice(len(p))
+	if !ok {
+		m = b.grow(len(p))
+	}
+	return copy(b.buf[m:], p), nil
+}
+
+// WriteString appends the contents of s to the buffer, growing the buffer as
+// needed. The return value n is the length of s; err is always nil. If the
+// buffer becomes too large, WriteString will panic with ErrTooLarge.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+	b.lastRead = opInvalid
+	m, ok := b.tryGrowByReslice(len(s))
+	if !ok {
+		m = b.grow(len(s))
+	}
+	return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+	b.lastRead = opInvalid
+	for {
+		i := b.grow(MinRead)
+		b.buf = b.buf[:i]
+		m, e := r.Read(b.buf[i:cap(b.buf)])
+		if m < 0 {
+			panic(errNegativeRead)
+		}
+
+		b.buf = b.buf[:i+m]
+		n += int64(m)
+		if e == io.EOF {
+			return n, nil // e is EOF, so return nil explicitly
+		}
+		if e != nil {
+			return n, e
+		}
+	}
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with ErrTooLarge.
+func makeSlice(n int) []byte {
+	// If the make fails, give a known error.
+	defer func() {
+		if recover() != nil {
+			panic(ErrTooLarge)
+		}
+	}()
+	return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+	b.lastRead = opInvalid
+	if nBytes := b.Len(); nBytes > 0 {
+		m, e := w.Write(b.buf[b.off:])
+		if m > nBytes {
+			panic("bytes.Buffer.WriteTo: invalid Write count")
+		}
+		b.off += m
+		n = int64(m)
+		if e != nil {
+			return n, e
+		}
+		// all bytes should have been written, by definition of
+		// Write method in io.Writer
+		if m != nBytes {
+			return n, io.ErrShortWrite
+		}
+	}
+	// Buffer is now empty; reset.
+	b.Reset()
+	return n, nil
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+	b.lastRead = opInvalid
+	m, ok := b.tryGrowByReslice(1)
+	if !ok {
+		m = b.grow(1)
+	}
+	b.buf[m] = c
+	return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to the
+// buffer, returning its length and an error, which is always nil but is
+// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with ErrTooLarge.
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
+	// Compare as uint32 to correctly handle negative runes.
+	if uint32(r) < utf8.RuneSelf {
+		b.WriteByte(byte(r))
+		return 1, nil
+	}
+	b.lastRead = opInvalid
+	m, ok := b.tryGrowByReslice(utf8.UTFMax)
+	if !ok {
+		m = b.grow(utf8.UTFMax)
+	}
+	n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
+	b.buf = b.buf[:m+n]
+	return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+	b.lastRead = opInvalid
+	if b.empty() {
+		// Buffer is empty, reset to recover space.
+		b.Reset()
+		if len(p) == 0 {
+			return 0, nil
+		}
+		return 0, io.EOF
+	}
+	n = copy(p, b.buf[b.off:])
+	b.off += n
+	if n > 0 {
+		b.lastRead = opRead
+	}
+	return n, nil
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+	b.lastRead = opInvalid
+	m := b.Len()
+	if n > m {
+		n = m
+	}
+	data := b.buf[b.off : b.off+n]
+	b.off += n
+	if n > 0 {
+		b.lastRead = opRead
+	}
+	return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (byte, error) {
+	if b.empty() {
+		// Buffer is empty, reset to recover space.
+		b.Reset()
+		return 0, io.EOF
+	}
+	c := b.buf[b.off]
+	b.off++
+	b.lastRead = opRead
+	return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is io.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
+	if b.empty() {
+		// Buffer is empty, reset to recover space.
+		b.Reset()
+		return 0, 0, io.EOF
+	}
+	c := b.buf[b.off]
+	if c < utf8.RuneSelf {
+		b.off++
+		b.lastRead = opReadRune1
+		return rune(c), 1, nil
+	}
+	r, n := utf8.DecodeRune(b.buf[b.off:])
+	b.off += n
+	b.lastRead = readOp(n)
+	return r, n, nil
+}
+
+// UnreadRune unreads the last rune returned by ReadRune.
+// If the most recent read or write operation on the buffer was
+// not a successful ReadRune, UnreadRune returns an error.  (In this regard
+// it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Buffer) UnreadRune() error {
+	if b.lastRead <= opInvalid {
+		return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
+	}
+	if b.off >= int(b.lastRead) {
+		b.off -= int(b.lastRead)
+	}
+	b.lastRead = opInvalid
+	return nil
+}
+
+var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
+
+// UnreadByte unreads the last byte returned by the most recent successful
+// read operation that read at least one byte. If a write has happened since
+// the last read, if the last read returned an error, or if the read read zero
+// bytes, UnreadByte returns an error.
+func (b *Buffer) UnreadByte() error {
+	if b.lastRead == opInvalid {
+		return errUnreadByte
+	}
+	b.lastRead = opInvalid
+	if b.off > 0 {
+		b.off--
+	}
+	return nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+	slice, err := b.readSlice(delim)
+	// return a copy of slice. The buffer's backing array may
+	// be overwritten by later calls.
+	line = append(line, slice...)
+	return line, err
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+	i := IndexByte(b.buf[b.off:], delim)
+	end := b.off + i + 1
+	if i < 0 {
+		end = len(b.buf)
+		err = io.EOF
+	}
+	line = b.buf[b.off:end]
+	b.off = end
+	b.lastRead = opRead
+	return line, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end
+// in delim.
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
+	slice, err := b.readSlice(delim)
+	return string(slice), err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its
+// initial contents. The new Buffer takes ownership of buf, and the
+// caller should not use buf after this call. NewBuffer is intended to
+// prepare a Buffer to read existing data. It can also be used to set
+// the initial size of the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBufferString(s string) *Buffer {
+	return &Buffer{buf: []byte(s)}
+}

+ 1296 - 0
contrib/go/_std_1.18/src/bytes/bytes.go

@@ -0,0 +1,1296 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bytes implements functions for the manipulation of byte slices.
+// It is analogous to the facilities of the strings package.
+package bytes
+
+import (
+	"internal/bytealg"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Equal reports whether a and b
+// are the same length and contain the same bytes.
+// A nil argument is equivalent to an empty slice.
+func Equal(a, b []byte) bool {
+	// Neither cmd/compile nor gccgo allocates for these string conversions.
+	return string(a) == string(b)
+}
+
+// Compare returns an integer comparing two byte slices lexicographically.
+// The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
+// A nil argument is equivalent to an empty slice.
+func Compare(a, b []byte) int {
+	return bytealg.Compare(a, b)
+}
+
+// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
+// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
+func explode(s []byte, n int) [][]byte {
+	if n <= 0 {
+		n = len(s)
+	}
+	a := make([][]byte, n)
+	var size int
+	na := 0
+	for len(s) > 0 {
+		if na+1 >= n {
+			a[na] = s
+			na++
+			break
+		}
+		_, size = utf8.DecodeRune(s)
+		a[na] = s[0:size:size]
+		s = s[size:]
+		na++
+	}
+	return a[0:na]
+}
+
+// Count counts the number of non-overlapping instances of sep in s.
+// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
+func Count(s, sep []byte) int {
+	// special case
+	if len(sep) == 0 {
+		return utf8.RuneCount(s) + 1
+	}
+	if len(sep) == 1 {
+		return bytealg.Count(s, sep[0])
+	}
+	n := 0
+	for {
+		i := Index(s, sep)
+		if i == -1 {
+			return n
+		}
+		n++
+		s = s[i+len(sep):]
+	}
+}
+
+// Contains reports whether subslice is within b.
+func Contains(b, subslice []byte) bool {
+	return Index(b, subslice) != -1
+}
+
+// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
+func ContainsAny(b []byte, chars string) bool {
+	return IndexAny(b, chars) >= 0
+}
+
+// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
+func ContainsRune(b []byte, r rune) bool {
+	return IndexRune(b, r) >= 0
+}
+
+// IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
+func IndexByte(b []byte, c byte) int {
+	return bytealg.IndexByte(b, c)
+}
+
+func indexBytePortable(s []byte, c byte) int {
+	for i, b := range s {
+		if b == c {
+			return i
+		}
+	}
+	return -1
+}
+
+// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
+func LastIndex(s, sep []byte) int {
+	n := len(sep)
+	switch {
+	case n == 0:
+		return len(s)
+	case n == 1:
+		return LastIndexByte(s, sep[0])
+	case n == len(s):
+		if Equal(s, sep) {
+			return 0
+		}
+		return -1
+	case n > len(s):
+		return -1
+	}
+	// Rabin-Karp search from the end of the string
+	hashss, pow := bytealg.HashStrRevBytes(sep)
+	last := len(s) - n
+	var h uint32
+	for i := len(s) - 1; i >= last; i-- {
+		h = h*bytealg.PrimeRK + uint32(s[i])
+	}
+	if h == hashss && Equal(s[last:], sep) {
+		return last
+	}
+	for i := last - 1; i >= 0; i-- {
+		h *= bytealg.PrimeRK
+		h += uint32(s[i])
+		h -= pow * uint32(s[i+n])
+		if h == hashss && Equal(s[i:i+n], sep) {
+			return i
+		}
+	}
+	return -1
+}
+
+// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
+func LastIndexByte(s []byte, c byte) int {
+	for i := len(s) - 1; i >= 0; i-- {
+		if s[i] == c {
+			return i
+		}
+	}
+	return -1
+}
+
+// IndexRune interprets s as a sequence of UTF-8-encoded code points.
+// It returns the byte index of the first occurrence in s of the given rune.
+// It returns -1 if rune is not present in s.
+// If r is utf8.RuneError, it returns the first instance of any
+// invalid UTF-8 byte sequence.
+func IndexRune(s []byte, r rune) int {
+	switch {
+	case 0 <= r && r < utf8.RuneSelf:
+		return IndexByte(s, byte(r))
+	case r == utf8.RuneError:
+		for i := 0; i < len(s); {
+			r1, n := utf8.DecodeRune(s[i:])
+			if r1 == utf8.RuneError {
+				return i
+			}
+			i += n
+		}
+		return -1
+	case !utf8.ValidRune(r):
+		return -1
+	default:
+		var b [utf8.UTFMax]byte
+		n := utf8.EncodeRune(b[:], r)
+		return Index(s, b[:n])
+	}
+}
+
+// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
+// It returns the byte index of the first occurrence in s of any of the Unicode
+// code points in chars. It returns -1 if chars is empty or if there is no code
+// point in common.
+func IndexAny(s []byte, chars string) int {
+	if chars == "" {
+		// Avoid scanning all of s.
+		return -1
+	}
+	if len(s) == 1 {
+		r := rune(s[0])
+		if r >= utf8.RuneSelf {
+			// search utf8.RuneError.
+			for _, r = range chars {
+				if r == utf8.RuneError {
+					return 0
+				}
+			}
+			return -1
+		}
+		if bytealg.IndexByteString(chars, s[0]) >= 0 {
+			return 0
+		}
+		return -1
+	}
+	if len(chars) == 1 {
+		r := rune(chars[0])
+		if r >= utf8.RuneSelf {
+			r = utf8.RuneError
+		}
+		return IndexRune(s, r)
+	}
+	if len(s) > 8 {
+		if as, isASCII := makeASCIISet(chars); isASCII {
+			for i, c := range s {
+				if as.contains(c) {
+					return i
+				}
+			}
+			return -1
+		}
+	}
+	var width int
+	for i := 0; i < len(s); i += width {
+		r := rune(s[i])
+		if r < utf8.RuneSelf {
+			if bytealg.IndexByteString(chars, s[i]) >= 0 {
+				return i
+			}
+			width = 1
+			continue
+		}
+		r, width = utf8.DecodeRune(s[i:])
+		if r != utf8.RuneError {
+			// r is 2 to 4 bytes
+			if len(chars) == width {
+				if chars == string(r) {
+					return i
+				}
+				continue
+			}
+			// Use bytealg.IndexString for performance if available.
+			if bytealg.MaxLen >= width {
+				if bytealg.IndexString(chars, string(r)) >= 0 {
+					return i
+				}
+				continue
+			}
+		}
+		for _, ch := range chars {
+			if r == ch {
+				return i
+			}
+		}
+	}
+	return -1
+}
+
+// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
+// points. It returns the byte index of the last occurrence in s of any of
+// the Unicode code points in chars. It returns -1 if chars is empty or if
+// there is no code point in common.
+func LastIndexAny(s []byte, chars string) int {
+	if chars == "" {
+		// Avoid scanning all of s.
+		return -1
+	}
+	if len(s) > 8 {
+		if as, isASCII := makeASCIISet(chars); isASCII {
+			for i := len(s) - 1; i >= 0; i-- {
+				if as.contains(s[i]) {
+					return i
+				}
+			}
+			return -1
+		}
+	}
+	if len(s) == 1 {
+		r := rune(s[0])
+		if r >= utf8.RuneSelf {
+			for _, r = range chars {
+				if r == utf8.RuneError {
+					return 0
+				}
+			}
+			return -1
+		}
+		if bytealg.IndexByteString(chars, s[0]) >= 0 {
+			return 0
+		}
+		return -1
+	}
+	if len(chars) == 1 {
+		cr := rune(chars[0])
+		if cr >= utf8.RuneSelf {
+			cr = utf8.RuneError
+		}
+		for i := len(s); i > 0; {
+			r, size := utf8.DecodeLastRune(s[:i])
+			i -= size
+			if r == cr {
+				return i
+			}
+		}
+		return -1
+	}
+	for i := len(s); i > 0; {
+		r := rune(s[i-1])
+		if r < utf8.RuneSelf {
+			if bytealg.IndexByteString(chars, s[i-1]) >= 0 {
+				return i - 1
+			}
+			i--
+			continue
+		}
+		r, size := utf8.DecodeLastRune(s[:i])
+		i -= size
+		if r != utf8.RuneError {
+			// r is 2 to 4 bytes
+			if len(chars) == size {
+				if chars == string(r) {
+					return i
+				}
+				continue
+			}
+			// Use bytealg.IndexString for performance if available.
+			if bytealg.MaxLen >= size {
+				if bytealg.IndexString(chars, string(r)) >= 0 {
+					return i
+				}
+				continue
+			}
+		}
+		for _, ch := range chars {
+			if r == ch {
+				return i
+			}
+		}
+	}
+	return -1
+}
+
+// Generic split: splits after each instance of sep,
+// including sepSave bytes of sep in the subslices.
+func genSplit(s, sep []byte, sepSave, n int) [][]byte {
+	if n == 0 {
+		return nil
+	}
+	if len(sep) == 0 {
+		return explode(s, n)
+	}
+	if n < 0 {
+		n = Count(s, sep) + 1
+	}
+
+	a := make([][]byte, n)
+	n--
+	i := 0
+	for i < n {
+		m := Index(s, sep)
+		if m < 0 {
+			break
+		}
+		a[i] = s[: m+sepSave : m+sepSave]
+		s = s[m+len(sep):]
+		i++
+	}
+	a[i] = s
+	return a[:i+1]
+}
+
+// SplitN slices s into subslices separated by sep and returns a slice of
+// the subslices between those separators.
+// If sep is empty, SplitN splits after each UTF-8 sequence.
+// The count determines the number of subslices to return:
+//   n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+//   n == 0: the result is nil (zero subslices)
+//   n < 0: all subslices
+//
+// To split around the first instance of a separator, see Cut.
+func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
+
+// SplitAfterN slices s into subslices after each instance of sep and
+// returns a slice of those subslices.
+// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
+// The count determines the number of subslices to return:
+//   n > 0: at most n subslices; the last subslice will be the unsplit remainder.
+//   n == 0: the result is nil (zero subslices)
+//   n < 0: all subslices
+func SplitAfterN(s, sep []byte, n int) [][]byte {
+	return genSplit(s, sep, len(sep), n)
+}
+
+// Split slices s into all subslices separated by sep and returns a slice of
+// the subslices between those separators.
+// If sep is empty, Split splits after each UTF-8 sequence.
+// It is equivalent to SplitN with a count of -1.
+//
+// To split around the first instance of a separator, see Cut.
+func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
+
+// SplitAfter slices s into all subslices after each instance of sep and
+// returns a slice of those subslices.
+// If sep is empty, SplitAfter splits after each UTF-8 sequence.
+// It is equivalent to SplitAfterN with a count of -1.
+func SplitAfter(s, sep []byte) [][]byte {
+	return genSplit(s, sep, len(sep), -1)
+}
+
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
+// Fields interprets s as a sequence of UTF-8-encoded code points.
+// It splits the slice s around each instance of one or more consecutive white space
+// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
+// empty slice if s contains only white space.
+func Fields(s []byte) [][]byte {
+	// First count the fields.
+	// This is an exact count if s is ASCII, otherwise it is an approximation.
+	n := 0
+	wasSpace := 1
+	// setBits is used to track which bits are set in the bytes of s.
+	setBits := uint8(0)
+	for i := 0; i < len(s); i++ {
+		r := s[i]
+		setBits |= r
+		isSpace := int(asciiSpace[r])
+		n += wasSpace & ^isSpace
+		wasSpace = isSpace
+	}
+
+	if setBits >= utf8.RuneSelf {
+		// Some runes in the input slice are not ASCII.
+		return FieldsFunc(s, unicode.IsSpace)
+	}
+
+	// ASCII fast path
+	a := make([][]byte, n)
+	na := 0
+	fieldStart := 0
+	i := 0
+	// Skip spaces in the front of the input.
+	for i < len(s) && asciiSpace[s[i]] != 0 {
+		i++
+	}
+	fieldStart = i
+	for i < len(s) {
+		if asciiSpace[s[i]] == 0 {
+			i++
+			continue
+		}
+		a[na] = s[fieldStart:i:i]
+		na++
+		i++
+		// Skip spaces in between fields.
+		for i < len(s) && asciiSpace[s[i]] != 0 {
+			i++
+		}
+		fieldStart = i
+	}
+	if fieldStart < len(s) { // Last field might end at EOF.
+		a[na] = s[fieldStart:len(s):len(s)]
+	}
+	return a
+}
+
+// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
+// It splits the slice s at each run of code points c satisfying f(c) and
+// returns a slice of subslices of s. If all code points in s satisfy f(c), or
+// len(s) == 0, an empty slice is returned.
+//
+// FieldsFunc makes no guarantees about the order in which it calls f(c)
+// and assumes that f always returns the same value for a given c.
+func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
+	// A span is used to record a slice of s of the form s[start:end].
+	// The start index is inclusive and the end index is exclusive.
+	type span struct {
+		start int
+		end   int
+	}
+	spans := make([]span, 0, 32)
+
+	// Find the field start and end indices.
+	// Doing this in a separate pass (rather than slicing the string s
+	// and collecting the result substrings right away) is significantly
+	// more efficient, possibly due to cache effects.
+	start := -1 // valid span start if >= 0
+	for i := 0; i < len(s); {
+		size := 1
+		r := rune(s[i])
+		if r >= utf8.RuneSelf {
+			r, size = utf8.DecodeRune(s[i:])
+		}
+		if f(r) {
+			if start >= 0 {
+				spans = append(spans, span{start, i})
+				start = -1
+			}
+		} else {
+			if start < 0 {
+				start = i
+			}
+		}
+		i += size
+	}
+
+	// Last field might end at EOF.
+	if start >= 0 {
+		spans = append(spans, span{start, len(s)})
+	}
+
+	// Create subslices from recorded field indices.
+	a := make([][]byte, len(spans))
+	for i, span := range spans {
+		a[i] = s[span.start:span.end:span.end]
+	}
+
+	return a
+}
+
+// Join concatenates the elements of s to create a new byte slice. The separator
+// sep is placed between elements in the resulting slice.
+func Join(s [][]byte, sep []byte) []byte {
+	if len(s) == 0 {
+		return []byte{}
+	}
+	if len(s) == 1 {
+		// Just return a copy.
+		return append([]byte(nil), s[0]...)
+	}
+	n := len(sep) * (len(s) - 1)
+	for _, v := range s {
+		n += len(v)
+	}
+
+	b := make([]byte, n)
+	bp := copy(b, s[0])
+	for _, v := range s[1:] {
+		bp += copy(b[bp:], sep)
+		bp += copy(b[bp:], v)
+	}
+	return b
+}
+
+// HasPrefix tests whether the byte slice s begins with prefix.
+func HasPrefix(s, prefix []byte) bool {
+	return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
+}
+
+// HasSuffix tests whether the byte slice s ends with suffix.
+func HasSuffix(s, suffix []byte) bool {
+	return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
+}
+
+// Map returns a copy of the byte slice s with all its characters modified
+// according to the mapping function. If mapping returns a negative value, the character is
+// dropped from the byte slice with no replacement. The characters in s and the
+// output are interpreted as UTF-8-encoded code points.
+func Map(mapping func(r rune) rune, s []byte) []byte {
+	// In the worst case, the slice can grow when mapped, making
+	// things unpleasant. But it's so rare we barge in assuming it's
+	// fine. It could also shrink but that falls out naturally.
+	maxbytes := len(s) // length of b
+	nbytes := 0        // number of bytes encoded in b
+	b := make([]byte, maxbytes)
+	for i := 0; i < len(s); {
+		wid := 1
+		r := rune(s[i])
+		if r >= utf8.RuneSelf {
+			r, wid = utf8.DecodeRune(s[i:])
+		}
+		r = mapping(r)
+		if r >= 0 {
+			rl := utf8.RuneLen(r)
+			if rl < 0 {
+				rl = len(string(utf8.RuneError))
+			}
+			if nbytes+rl > maxbytes {
+				// Grow the buffer.
+				maxbytes = maxbytes*2 + utf8.UTFMax
+				nb := make([]byte, maxbytes)
+				copy(nb, b[0:nbytes])
+				b = nb
+			}
+			nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
+		}
+		i += wid
+	}
+	return b[0:nbytes]
+}
+
+// Repeat returns a new byte slice consisting of count copies of b.
+//
+// It panics if count is negative or if
+// the result of (len(b) * count) overflows.
+func Repeat(b []byte, count int) []byte {
+	if count == 0 {
+		return []byte{}
+	}
+	// Since we cannot return an error on overflow,
+	// we should panic if the repeat will generate
+	// an overflow.
+	// See Issue golang.org/issue/16237.
+	if count < 0 {
+		panic("bytes: negative Repeat count")
+	} else if len(b)*count/count != len(b) {
+		panic("bytes: Repeat count causes overflow")
+	}
+
+	nb := make([]byte, len(b)*count)
+	bp := copy(nb, b)
+	for bp < len(nb) {
+		copy(nb[bp:], nb[:bp])
+		bp *= 2
+	}
+	return nb
+}
+
+// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
+// their upper case.
+func ToUpper(s []byte) []byte {
+	isASCII, hasLower := true, false
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= utf8.RuneSelf {
+			isASCII = false
+			break
+		}
+		hasLower = hasLower || ('a' <= c && c <= 'z')
+	}
+
+	if isASCII { // optimize for ASCII-only byte slices.
+		if !hasLower {
+			// Just return a copy.
+			return append([]byte(""), s...)
+		}
+		b := make([]byte, len(s))
+		for i := 0; i < len(s); i++ {
+			c := s[i]
+			if 'a' <= c && c <= 'z' {
+				c -= 'a' - 'A'
+			}
+			b[i] = c
+		}
+		return b
+	}
+	return Map(unicode.ToUpper, s)
+}
+
+// ToLower returns a copy of the byte slice s with all Unicode letters mapped to
+// their lower case.
+func ToLower(s []byte) []byte {
+	isASCII, hasUpper := true, false
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= utf8.RuneSelf {
+			isASCII = false
+			break
+		}
+		hasUpper = hasUpper || ('A' <= c && c <= 'Z')
+	}
+
+	if isASCII { // optimize for ASCII-only byte slices.
+		if !hasUpper {
+			return append([]byte(""), s...)
+		}
+		b := make([]byte, len(s))
+		for i := 0; i < len(s); i++ {
+			c := s[i]
+			if 'A' <= c && c <= 'Z' {
+				c += 'a' - 'A'
+			}
+			b[i] = c
+		}
+		return b
+	}
+	return Map(unicode.ToLower, s)
+}
+
+// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
+func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
+
+// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
+// upper case, giving priority to the special casing rules.
+func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
+	return Map(c.ToUpper, s)
+}
+
+// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
+// lower case, giving priority to the special casing rules.
+func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
+	return Map(c.ToLower, s)
+}
+
+// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
+// title case, giving priority to the special casing rules.
+func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
+	return Map(c.ToTitle, s)
+}
+
+// ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
+// representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
+func ToValidUTF8(s, replacement []byte) []byte {
+	b := make([]byte, 0, len(s)+len(replacement))
+	invalid := false // previous byte was from an invalid UTF-8 sequence
+	for i := 0; i < len(s); {
+		c := s[i]
+		if c < utf8.RuneSelf {
+			i++
+			invalid = false
+			b = append(b, c)
+			continue
+		}
+		_, wid := utf8.DecodeRune(s[i:])
+		if wid == 1 {
+			i++
+			if !invalid {
+				invalid = true
+				b = append(b, replacement...)
+			}
+			continue
+		}
+		invalid = false
+		b = append(b, s[i:i+wid]...)
+		i += wid
+	}
+	return b
+}
+
+// isSeparator reports whether the rune could mark a word boundary.
+// TODO: update when package unicode captures more of the properties.
+func isSeparator(r rune) bool {
+	// ASCII alphanumerics and underscore are not separators
+	if r <= 0x7F {
+		switch {
+		case '0' <= r && r <= '9':
+			return false
+		case 'a' <= r && r <= 'z':
+			return false
+		case 'A' <= r && r <= 'Z':
+			return false
+		case r == '_':
+			return false
+		}
+		return true
+	}
+	// Letters and digits are not separators
+	if unicode.IsLetter(r) || unicode.IsDigit(r) {
+		return false
+	}
+	// Otherwise, all we can do for now is treat spaces as separators.
+	return unicode.IsSpace(r)
+}
+
+// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
+// words mapped to their title case.
+//
+// Deprecated: The rule Title uses for word boundaries does not handle Unicode
+// punctuation properly. Use golang.org/x/text/cases instead.
+func Title(s []byte) []byte {
+	// Use a closure here to remember state.
+	// Hackish but effective. Depends on Map scanning in order and calling
+	// the closure once per rune.
+	prev := ' '
+	return Map(
+		func(r rune) rune {
+			if isSeparator(prev) {
+				prev = r
+				return unicode.ToTitle(r)
+			}
+			prev = r
+			return r
+		},
+		s)
+}
+
+// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
+// all leading UTF-8-encoded code points c that satisfy f(c).
+func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
+	i := indexFunc(s, f, false)
+	if i == -1 {
+		return nil
+	}
+	return s[i:]
+}
+
+// TrimRightFunc returns a subslice of s by slicing off all trailing
+// UTF-8-encoded code points c that satisfy f(c).
+func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
+	i := lastIndexFunc(s, f, false)
+	if i >= 0 && s[i] >= utf8.RuneSelf {
+		_, wid := utf8.DecodeRune(s[i:])
+		i += wid
+	} else {
+		i++
+	}
+	return s[0:i]
+}
+
+// TrimFunc returns a subslice of s by slicing off all leading and trailing
+// UTF-8-encoded code points c that satisfy f(c).
+func TrimFunc(s []byte, f func(r rune) bool) []byte {
+	return TrimRightFunc(TrimLeftFunc(s, f), f)
+}
+
+// TrimPrefix returns s without the provided leading prefix string.
+// If s doesn't start with prefix, s is returned unchanged.
+func TrimPrefix(s, prefix []byte) []byte {
+	if HasPrefix(s, prefix) {
+		return s[len(prefix):]
+	}
+	return s
+}
+
+// TrimSuffix returns s without the provided trailing suffix string.
+// If s doesn't end with suffix, s is returned unchanged.
+func TrimSuffix(s, suffix []byte) []byte {
+	if HasSuffix(s, suffix) {
+		return s[:len(s)-len(suffix)]
+	}
+	return s
+}
+
+// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
+// It returns the byte index in s of the first Unicode
+// code point satisfying f(c), or -1 if none do.
+func IndexFunc(s []byte, f func(r rune) bool) int {
+	return indexFunc(s, f, true)
+}
+
+// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
+// It returns the byte index in s of the last Unicode
+// code point satisfying f(c), or -1 if none do.
+func LastIndexFunc(s []byte, f func(r rune) bool) int {
+	return lastIndexFunc(s, f, true)
+}
+
+// indexFunc is the same as IndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
+	start := 0
+	for start < len(s) {
+		wid := 1
+		r := rune(s[start])
+		if r >= utf8.RuneSelf {
+			r, wid = utf8.DecodeRune(s[start:])
+		}
+		if f(r) == truth {
+			return start
+		}
+		start += wid
+	}
+	return -1
+}
+
+// lastIndexFunc is the same as LastIndexFunc except that if
+// truth==false, the sense of the predicate function is
+// inverted.
+func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
+	for i := len(s); i > 0; {
+		r, size := rune(s[i-1]), 1
+		if r >= utf8.RuneSelf {
+			r, size = utf8.DecodeLastRune(s[0:i])
+		}
+		i -= size
+		if f(r) == truth {
+			return i
+		}
+	}
+	return -1
+}
+
+// asciiSet is a 32-byte value, where each bit represents the presence of a
+// given ASCII character in the set. The 128-bits of the lower 16 bytes,
+// starting with the least-significant bit of the lowest word to the
+// most-significant bit of the highest word, map to the full range of all
+// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
+// ensuring that any non-ASCII character will be reported as not in the set.
+// This allocates a total of 32 bytes even though the upper half
+// is unused to avoid bounds checks in asciiSet.contains.
+type asciiSet [8]uint32
+
+// makeASCIISet creates a set of ASCII characters and reports whether all
+// characters in chars are ASCII.
+func makeASCIISet(chars string) (as asciiSet, ok bool) {
+	for i := 0; i < len(chars); i++ {
+		c := chars[i]
+		if c >= utf8.RuneSelf {
+			return as, false
+		}
+		as[c/32] |= 1 << (c % 32)
+	}
+	return as, true
+}
+
+// contains reports whether c is inside the set.
+func (as *asciiSet) contains(c byte) bool {
+	return (as[c/32] & (1 << (c % 32))) != 0
+}
+
+// containsRune is a simplified version of strings.ContainsRune
+// to avoid importing the strings package.
+// We avoid bytes.ContainsRune to avoid allocating a temporary copy of s.
+func containsRune(s string, r rune) bool {
+	for _, c := range s {
+		if c == r {
+			return true
+		}
+	}
+	return false
+}
+
+// Trim returns a subslice of s by slicing off all leading and
+// trailing UTF-8-encoded code points contained in cutset.
+func Trim(s []byte, cutset string) []byte {
+	if len(s) == 0 {
+		// This is what we've historically done.
+		return nil
+	}
+	if cutset == "" {
+		return s
+	}
+	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+		return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
+	}
+	if as, ok := makeASCIISet(cutset); ok {
+		return trimLeftASCII(trimRightASCII(s, &as), &as)
+	}
+	return trimLeftUnicode(trimRightUnicode(s, cutset), cutset)
+}
+
+// TrimLeft returns a subslice of s by slicing off all leading
+// UTF-8-encoded code points contained in cutset.
+func TrimLeft(s []byte, cutset string) []byte {
+	if len(s) == 0 {
+		// This is what we've historically done.
+		return nil
+	}
+	if cutset == "" {
+		return s
+	}
+	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+		return trimLeftByte(s, cutset[0])
+	}
+	if as, ok := makeASCIISet(cutset); ok {
+		return trimLeftASCII(s, &as)
+	}
+	return trimLeftUnicode(s, cutset)
+}
+
+func trimLeftByte(s []byte, c byte) []byte {
+	for len(s) > 0 && s[0] == c {
+		s = s[1:]
+	}
+	if len(s) == 0 {
+		// This is what we've historically done.
+		return nil
+	}
+	return s
+}
+
+func trimLeftASCII(s []byte, as *asciiSet) []byte {
+	for len(s) > 0 {
+		if !as.contains(s[0]) {
+			break
+		}
+		s = s[1:]
+	}
+	if len(s) == 0 {
+		// This is what we've historically done.
+		return nil
+	}
+	return s
+}
+
+func trimLeftUnicode(s []byte, cutset string) []byte {
+	for len(s) > 0 {
+		r, n := rune(s[0]), 1
+		if r >= utf8.RuneSelf {
+			r, n = utf8.DecodeRune(s)
+		}
+		if !containsRune(cutset, r) {
+			break
+		}
+		s = s[n:]
+	}
+	if len(s) == 0 {
+		// This is what we've historically done.
+		return nil
+	}
+	return s
+}
+
+// TrimRight returns a subslice of s by slicing off all trailing
+// UTF-8-encoded code points that are contained in cutset.
+func TrimRight(s []byte, cutset string) []byte {
+	if len(s) == 0 || cutset == "" {
+		return s
+	}
+	if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
+		return trimRightByte(s, cutset[0])
+	}
+	if as, ok := makeASCIISet(cutset); ok {
+		return trimRightASCII(s, &as)
+	}
+	return trimRightUnicode(s, cutset)
+}
+
+func trimRightByte(s []byte, c byte) []byte {
+	for len(s) > 0 && s[len(s)-1] == c {
+		s = s[:len(s)-1]
+	}
+	return s
+}
+
+func trimRightASCII(s []byte, as *asciiSet) []byte {
+	for len(s) > 0 {
+		if !as.contains(s[len(s)-1]) {
+			break
+		}
+		s = s[:len(s)-1]
+	}
+	return s
+}
+
+func trimRightUnicode(s []byte, cutset string) []byte {
+	for len(s) > 0 {
+		r, n := rune(s[len(s)-1]), 1
+		if r >= utf8.RuneSelf {
+			r, n = utf8.DecodeLastRune(s)
+		}
+		if !containsRune(cutset, r) {
+			break
+		}
+		s = s[:len(s)-n]
+	}
+	return s
+}
+
+// TrimSpace returns a subslice of s by slicing off all leading and
+// trailing white space, as defined by Unicode.
+func TrimSpace(s []byte) []byte {
+	// Fast path for ASCII: look for the first ASCII non-space byte
+	start := 0
+	for ; start < len(s); start++ {
+		c := s[start]
+		if c >= utf8.RuneSelf {
+			// If we run into a non-ASCII byte, fall back to the
+			// slower unicode-aware method on the remaining bytes
+			return TrimFunc(s[start:], unicode.IsSpace)
+		}
+		if asciiSpace[c] == 0 {
+			break
+		}
+	}
+
+	// Now look for the first ASCII non-space byte from the end
+	stop := len(s)
+	for ; stop > start; stop-- {
+		c := s[stop-1]
+		if c >= utf8.RuneSelf {
+			return TrimFunc(s[start:stop], unicode.IsSpace)
+		}
+		if asciiSpace[c] == 0 {
+			break
+		}
+	}
+
+	// At this point s[start:stop] starts and ends with an ASCII
+	// non-space bytes, so we're done. Non-ASCII cases have already
+	// been handled above.
+	if start == stop {
+		// Special case to preserve previous TrimLeftFunc behavior,
+		// returning nil instead of empty slice if all spaces.
+		return nil
+	}
+	return s[start:stop]
+}
+
+// Runes interprets s as a sequence of UTF-8-encoded code points.
+// It returns a slice of runes (Unicode code points) equivalent to s.
+func Runes(s []byte) []rune {
+	t := make([]rune, utf8.RuneCount(s))
+	i := 0
+	for len(s) > 0 {
+		r, l := utf8.DecodeRune(s)
+		t[i] = r
+		i++
+		s = s[l:]
+	}
+	return t
+}
+
+// Replace returns a copy of the slice s with the first n
+// non-overlapping instances of old replaced by new.
+// If old is empty, it matches at the beginning of the slice
+// and after each UTF-8 sequence, yielding up to k+1 replacements
+// for a k-rune slice.
+// If n < 0, there is no limit on the number of replacements.
+func Replace(s, old, new []byte, n int) []byte {
+	m := 0
+	if n != 0 {
+		// Compute number of replacements.
+		m = Count(s, old)
+	}
+	if m == 0 {
+		// Just return a copy.
+		return append([]byte(nil), s...)
+	}
+	if n < 0 || m < n {
+		n = m
+	}
+
+	// Apply replacements to buffer.
+	t := make([]byte, len(s)+n*(len(new)-len(old)))
+	w := 0
+	start := 0
+	for i := 0; i < n; i++ {
+		j := start
+		if len(old) == 0 {
+			if i > 0 {
+				_, wid := utf8.DecodeRune(s[start:])
+				j += wid
+			}
+		} else {
+			j += Index(s[start:], old)
+		}
+		w += copy(t[w:], s[start:j])
+		w += copy(t[w:], new)
+		start = j + len(old)
+	}
+	w += copy(t[w:], s[start:])
+	return t[0:w]
+}
+
+// ReplaceAll returns a copy of the slice s with all
+// non-overlapping instances of old replaced by new.
+// If old is empty, it matches at the beginning of the slice
+// and after each UTF-8 sequence, yielding up to k+1 replacements
+// for a k-rune slice.
+func ReplaceAll(s, old, new []byte) []byte {
+	return Replace(s, old, new, -1)
+}
+
+// EqualFold reports whether s and t, interpreted as UTF-8 strings,
+// are equal under Unicode case-folding, which is a more general
+// form of case-insensitivity.
+func EqualFold(s, t []byte) bool {
+	for len(s) != 0 && len(t) != 0 {
+		// Extract first rune from each.
+		var sr, tr rune
+		if s[0] < utf8.RuneSelf {
+			sr, s = rune(s[0]), s[1:]
+		} else {
+			r, size := utf8.DecodeRune(s)
+			sr, s = r, s[size:]
+		}
+		if t[0] < utf8.RuneSelf {
+			tr, t = rune(t[0]), t[1:]
+		} else {
+			r, size := utf8.DecodeRune(t)
+			tr, t = r, t[size:]
+		}
+
+		// If they match, keep going; if not, return false.
+
+		// Easy case.
+		if tr == sr {
+			continue
+		}
+
+		// Make sr < tr to simplify what follows.
+		if tr < sr {
+			tr, sr = sr, tr
+		}
+		// Fast check for ASCII.
+		if tr < utf8.RuneSelf {
+			// ASCII only, sr/tr must be upper/lower case
+			if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
+				continue
+			}
+			return false
+		}
+
+		// General case. SimpleFold(x) returns the next equivalent rune > x
+		// or wraps around to smaller values.
+		r := unicode.SimpleFold(sr)
+		for r != sr && r < tr {
+			r = unicode.SimpleFold(r)
+		}
+		if r == tr {
+			continue
+		}
+		return false
+	}
+
+	// One string is empty. Are both?
+	return len(s) == len(t)
+}
+
+// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
+func Index(s, sep []byte) int {
+	n := len(sep)
+	switch {
+	case n == 0:
+		return 0
+	case n == 1:
+		return IndexByte(s, sep[0])
+	case n == len(s):
+		if Equal(sep, s) {
+			return 0
+		}
+		return -1
+	case n > len(s):
+		return -1
+	case n <= bytealg.MaxLen:
+		// Use brute force when s and sep both are small
+		if len(s) <= bytealg.MaxBruteForce {
+			return bytealg.Index(s, sep)
+		}
+		c0 := sep[0]
+		c1 := sep[1]
+		i := 0
+		t := len(s) - n + 1
+		fails := 0
+		for i < t {
+			if s[i] != c0 {
+				// IndexByte is faster than bytealg.Index, so use it as long as
+				// we're not getting lots of false positives.
+				o := IndexByte(s[i+1:t], c0)
+				if o < 0 {
+					return -1
+				}
+				i += o + 1
+			}
+			if s[i+1] == c1 && Equal(s[i:i+n], sep) {
+				return i
+			}
+			fails++
+			i++
+			// Switch to bytealg.Index when IndexByte produces too many false positives.
+			if fails > bytealg.Cutover(i) {
+				r := bytealg.Index(s[i:], sep)
+				if r >= 0 {
+					return r + i
+				}
+				return -1
+			}
+		}
+		return -1
+	}
+	c0 := sep[0]
+	c1 := sep[1]
+	i := 0
+	fails := 0
+	t := len(s) - n + 1
+	for i < t {
+		if s[i] != c0 {
+			o := IndexByte(s[i+1:t], c0)
+			if o < 0 {
+				break
+			}
+			i += o + 1
+		}
+		if s[i+1] == c1 && Equal(s[i:i+n], sep) {
+			return i
+		}
+		i++
+		fails++
+		if fails >= 4+i>>4 && i < t {
+			// Give up on IndexByte, it isn't skipping ahead
+			// far enough to be better than Rabin-Karp.
+			// Experiments (using IndexPeriodic) suggest
+			// the cutover is about 16 byte skips.
+			// TODO: if large prefixes of sep are matching
+			// we should cutover at even larger average skips,
+			// because Equal becomes that much more expensive.
+			// This code does not take that effect into account.
+			j := bytealg.IndexRabinKarpBytes(s[i:], sep)
+			if j < 0 {
+				return -1
+			}
+			return i + j
+		}
+	}
+	return -1
+}
+
+// Cut slices s around the first instance of sep,
+// returning the text before and after sep.
+// The found result reports whether sep appears in s.
+// If sep does not appear in s, cut returns s, nil, false.
+//
+// Cut returns slices of the original slice s, not copies.
+func Cut(s, sep []byte) (before, after []byte, found bool) {
+	if i := Index(s, sep); i >= 0 {
+		return s[:i], s[i+len(sep):], true
+	}
+	return s, nil, false
+}

+ 160 - 0
contrib/go/_std_1.18/src/bytes/reader.go

@@ -0,0 +1,160 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytes
+
+import (
+	"errors"
+	"io"
+	"unicode/utf8"
+)
+
+// A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
+// io.ByteScanner, and io.RuneScanner interfaces by reading from
+// a byte slice.
+// Unlike a Buffer, a Reader is read-only and supports seeking.
+// The zero value for Reader operates like a Reader of an empty slice.
+type Reader struct {
+	s        []byte
+	i        int64 // current reading index
+	prevRune int   // index of previous rune; or < 0
+}
+
+// Len returns the number of bytes of the unread portion of the
+// slice.
+func (r *Reader) Len() int {
+	if r.i >= int64(len(r.s)) {
+		return 0
+	}
+	return int(int64(len(r.s)) - r.i)
+}
+
+// Size returns the original length of the underlying byte slice.
+// Size is the number of bytes available for reading via ReadAt.
+// The returned value is always the same and is not affected by calls
+// to any other method.
+func (r *Reader) Size() int64 { return int64(len(r.s)) }
+
+// Read implements the io.Reader interface.
+func (r *Reader) Read(b []byte) (n int, err error) {
+	if r.i >= int64(len(r.s)) {
+		return 0, io.EOF
+	}
+	r.prevRune = -1
+	n = copy(b, r.s[r.i:])
+	r.i += int64(n)
+	return
+}
+
+// ReadAt implements the io.ReaderAt interface.
+func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
+	// cannot modify state - see io.ReaderAt
+	if off < 0 {
+		return 0, errors.New("bytes.Reader.ReadAt: negative offset")
+	}
+	if off >= int64(len(r.s)) {
+		return 0, io.EOF
+	}
+	n = copy(b, r.s[off:])
+	if n < len(b) {
+		err = io.EOF
+	}
+	return
+}
+
+// ReadByte implements the io.ByteReader interface.
+func (r *Reader) ReadByte() (byte, error) {
+	r.prevRune = -1
+	if r.i >= int64(len(r.s)) {
+		return 0, io.EOF
+	}
+	b := r.s[r.i]
+	r.i++
+	return b, nil
+}
+
+// UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
+func (r *Reader) UnreadByte() error {
+	if r.i <= 0 {
+		return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
+	}
+	r.prevRune = -1
+	r.i--
+	return nil
+}
+
+// ReadRune implements the io.RuneReader interface.
+func (r *Reader) ReadRune() (ch rune, size int, err error) {
+	if r.i >= int64(len(r.s)) {
+		r.prevRune = -1
+		return 0, 0, io.EOF
+	}
+	r.prevRune = int(r.i)
+	if c := r.s[r.i]; c < utf8.RuneSelf {
+		r.i++
+		return rune(c), 1, nil
+	}
+	ch, size = utf8.DecodeRune(r.s[r.i:])
+	r.i += int64(size)
+	return
+}
+
+// UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
+func (r *Reader) UnreadRune() error {
+	if r.i <= 0 {
+		return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
+	}
+	if r.prevRune < 0 {
+		return errors.New("bytes.Reader.UnreadRune: previous operation was not ReadRune")
+	}
+	r.i = int64(r.prevRune)
+	r.prevRune = -1
+	return nil
+}
+
+// Seek implements the io.Seeker interface.
+func (r *Reader) Seek(offset int64, whence int) (int64, error) {
+	r.prevRune = -1
+	var abs int64
+	switch whence {
+	case io.SeekStart:
+		abs = offset
+	case io.SeekCurrent:
+		abs = r.i + offset
+	case io.SeekEnd:
+		abs = int64(len(r.s)) + offset
+	default:
+		return 0, errors.New("bytes.Reader.Seek: invalid whence")
+	}
+	if abs < 0 {
+		return 0, errors.New("bytes.Reader.Seek: negative position")
+	}
+	r.i = abs
+	return abs, nil
+}
+
+// WriteTo implements the io.WriterTo interface.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+	r.prevRune = -1
+	if r.i >= int64(len(r.s)) {
+		return 0, nil
+	}
+	b := r.s[r.i:]
+	m, err := w.Write(b)
+	if m > len(b) {
+		panic("bytes.Reader.WriteTo: invalid Write count")
+	}
+	r.i += int64(m)
+	n = int64(m)
+	if m != len(b) && err == nil {
+		err = io.ErrShortWrite
+	}
+	return
+}
+
+// Reset resets the Reader to be reading from b.
+func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
+
+// NewReader returns a new Reader reading from b.
+func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }

Some files were not shown because too many files changed in this diff