Browse Source

Change "devtools/contrib/piglet/projects/ydblib/config.yaml"
e790478457ac44c1468389d3b31b067a90df8e4e

rekby 8 months ago
parent
commit
d024de4c41

+ 122 - 0
contrib/python/docker/.dist-info/METADATA

@@ -0,0 +1,122 @@
+Metadata-Version: 2.3
+Name: docker
+Version: 7.1.0
+Summary: A Python library for the Docker Engine API.
+Project-URL: Changelog, https://docker-py.readthedocs.io/en/stable/change-log.html
+Project-URL: Documentation, https://docker-py.readthedocs.io
+Project-URL: Homepage, https://github.com/docker/docker-py
+Project-URL: Source, https://github.com/docker/docker-py
+Project-URL: Tracker, https://github.com/docker/docker-py/issues
+Maintainer-email: "Docker Inc." <no-reply@docker.com>
+License-Expression: Apache-2.0
+License-File: LICENSE
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Software Development
+Classifier: Topic :: Utilities
+Requires-Python: >=3.8
+Requires-Dist: pywin32>=304; sys_platform == 'win32'
+Requires-Dist: requests>=2.26.0
+Requires-Dist: urllib3>=1.26.0
+Provides-Extra: dev
+Requires-Dist: coverage==7.2.7; extra == 'dev'
+Requires-Dist: pytest-cov==4.1.0; extra == 'dev'
+Requires-Dist: pytest-timeout==2.1.0; extra == 'dev'
+Requires-Dist: pytest==7.4.2; extra == 'dev'
+Requires-Dist: ruff==0.1.8; extra == 'dev'
+Provides-Extra: docs
+Requires-Dist: myst-parser==0.18.0; extra == 'docs'
+Requires-Dist: sphinx==5.1.1; extra == 'docs'
+Provides-Extra: ssh
+Requires-Dist: paramiko>=2.4.3; extra == 'ssh'
+Provides-Extra: tls
+Provides-Extra: websockets
+Requires-Dist: websocket-client>=1.3.0; extra == 'websockets'
+Description-Content-Type: text/markdown
+
+# Docker SDK for Python
+
+[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml)
+
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+
+## Installation
+
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip:
+
+    pip install docker
+
+> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support.
+> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
+
+## Usage
+
+Connect to Docker using the default socket or the configuration in your environment:
+
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu:latest", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+...   print(line.strip())
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.

+ 1 - 0
contrib/python/docker/.dist-info/top_level.txt

@@ -0,0 +1 @@
+docker

+ 191 - 0
contrib/python/docker/LICENSE

@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2016 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 76 - 0
contrib/python/docker/README.md

@@ -0,0 +1,76 @@
+# Docker SDK for Python
+
+[![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml)
+
+A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
+
+## Installation
+
+The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip:
+
+    pip install docker
+
+> Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support.
+> This is no longer necessary and is a no-op, but is supported for backwards compatibility.
+
+## Usage
+
+Connect to Docker using the default socket or the configuration in your environment:
+
+```python
+import docker
+client = docker.from_env()
+```
+
+You can run containers:
+
+```python
+>>> client.containers.run("ubuntu:latest", "echo hello world")
+'hello world\n'
+```
+
+You can run containers in the background:
+
+```python
+>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
+<Container '45e6d2de7c54'>
+```
+
+You can manage containers:
+
+```python
+>>> client.containers.list()
+[<Container '45e6d2de7c54'>, <Container 'db18e4f20eaa'>, ...]
+
+>>> container = client.containers.get('45e6d2de7c54')
+
+>>> container.attrs['Config']['Image']
+"bfirsh/reticulate-splines"
+
+>>> container.logs()
+"Reticulating spline 1...\n"
+
+>>> container.stop()
+```
+
+You can stream logs:
+
+```python
+>>> for line in container.logs(stream=True):
+...   print(line.strip())
+Reticulating spline 2...
+Reticulating spline 3...
+...
+```
+
+You can manage images:
+
+```python
+>>> client.images.pull('nginx')
+<Image 'nginx'>
+
+>>> client.images.list()
+[<Image 'ubuntu'>, <Image 'nginx'>, ...]
+```
+
+[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.

+ 7 - 0
contrib/python/docker/docker/__init__.py

@@ -0,0 +1,7 @@
+from .api import APIClient
+from .client import DockerClient, from_env
+from .context import Context, ContextAPI
+from .tls import TLSConfig
+from .version import __version__
+
+__title__ = 'docker'

+ 16 - 0
contrib/python/docker/docker/_version.py

@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+    from typing import Tuple, Union
+    VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+    VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '7.1.0'
+__version_tuple__ = version_tuple = (7, 1, 0)

+ 1 - 0
contrib/python/docker/docker/api/__init__.py

@@ -0,0 +1 @@
+from .client import APIClient

+ 382 - 0
contrib/python/docker/docker/api/build.py

@@ -0,0 +1,382 @@
+import json
+import logging
+import os
+import random
+
+from .. import auth, constants, errors, utils
+
+log = logging.getLogger(__name__)
+
+
+class BuildApiMixin:
+    def build(self, path=None, tag=None, quiet=False, fileobj=None,
+              nocache=False, rm=False, timeout=None,
+              custom_context=False, encoding=None, pull=False,
+              forcerm=False, dockerfile=None, container_limits=None,
+              decode=False, buildargs=None, gzip=False, shmsize=None,
+              labels=None, cache_from=None, target=None, network_mode=None,
+              squash=None, extra_hosts=None, platform=None, isolation=None,
+              use_config_proxy=True):
+        """
+        Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
+        needs to be set. ``path`` can be a local path (to a directory
+        containing a Dockerfile) or a remote URL. ``fileobj`` must be a
+        readable file-like object to a Dockerfile.
+
+        If you have a tar file for the Docker build context (including a
+        Dockerfile) already, pass a readable file-like object to ``fileobj``
+        and also pass ``custom_context=True``. If the stream is compressed
+        also, set ``encoding`` to the correct value (e.g ``gzip``).
+
+        Example:
+            >>> from io import BytesIO
+            >>> from docker import APIClient
+            >>> dockerfile = '''
+            ... # Shared Volume
+            ... FROM busybox:buildroot-2014.02
+            ... VOLUME /data
+            ... CMD ["/bin/sh"]
+            ... '''
+            >>> f = BytesIO(dockerfile.encode('utf-8'))
+            >>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
+            >>> response = [line for line in cli.build(
+            ...     fileobj=f, rm=True, tag='yourname/volume'
+            ... )]
+            >>> response
+            ['{"stream":" ---\\u003e a9eb17255234\\n"}',
+             '{"stream":"Step 1 : VOLUME /data\\n"}',
+             '{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
+             '{"stream":" ---\\u003e 713bca62012e\\n"}',
+             '{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
+             '{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
+             '{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
+             '{"stream":" ---\\u003e 032b8b2855fc\\n"}',
+             '{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
+             '{"stream":"Successfully built 032b8b2855fc\\n"}']
+
+        Args:
+            path (str): Path to the directory containing the Dockerfile
+            fileobj: A file object to use as the Dockerfile. (Or a file-like
+                object)
+            tag (str): A tag to add to the final image
+            quiet (bool): Whether to return the status
+            nocache (bool): Don't use the cache when set to ``True``
+            rm (bool): Remove intermediate containers. The ``docker build``
+                command now defaults to ``--rm=true``, but we have kept the old
+                default of `False` to preserve backward compatibility
+            timeout (int): HTTP timeout
+            custom_context (bool): Optional if using ``fileobj``
+            encoding (str): The encoding for a stream. Set to ``gzip`` for
+                compressing
+            pull (bool): Downloads any updates to the FROM image in Dockerfiles
+            forcerm (bool): Always remove intermediate containers, even after
+                unsuccessful builds
+            dockerfile (str): path within the build context to the Dockerfile
+            gzip (bool): If set to ``True``, gzip compression/encoding is used
+            buildargs (dict): A dictionary of build arguments
+            container_limits (dict): A dictionary of limits applied to each
+                container created by the build process. Valid keys:
+
+                - memory (int): set memory limit for build
+                - memswap (int): Total memory (memory + swap), -1 to disable
+                    swap
+                - cpushares (int): CPU shares (relative weight)
+                - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+                    ``"0-3"``, ``"0,1"``
+            decode (bool): If set to ``True``, the returned stream will be
+                decoded into dicts on the fly. Default ``False``
+            shmsize (int): Size of `/dev/shm` in bytes. The size must be
+                greater than 0. If omitted the system uses 64MB
+            labels (dict): A dictionary of labels to set on the image
+            cache_from (:py:class:`list`): A list of images used for build
+                cache resolution
+            target (str): Name of the build-stage to build in a multi-stage
+                Dockerfile
+            network_mode (str): networking mode for the run commands during
+                build
+            squash (bool): Squash the resulting images layers into a
+                single layer.
+            extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+                containers, as a mapping of hostname to IP address.
+            platform (str): Platform in the format ``os[/arch[/variant]]``
+            isolation (str): Isolation technology used during build.
+                Default: `None`.
+            use_config_proxy (bool): If ``True``, and if the docker client
+                configuration file (``~/.docker/config.json`` by default)
+                contains a proxy configuration, the corresponding environment
+                variables will be set in the container being built.
+
+        Returns:
+            A generator for the build output.
+
+        Raises:
+            :py:class:`docker.errors.APIError`
+                If the server returns an error.
+            ``TypeError``
+                If neither ``path`` nor ``fileobj`` is specified.
+        """
+        remote = context = None
+        headers = {}
+        container_limits = container_limits or {}
+        buildargs = buildargs or {}
+        if path is None and fileobj is None:
+            raise TypeError("Either path or fileobj needs to be provided.")
+        if gzip and encoding is not None:
+            raise errors.DockerException(
+                'Can not use custom encoding if gzip is enabled'
+            )
+        if tag is not None:
+            if not utils.match_tag(tag):
+                raise errors.DockerException(
+                    f"invalid tag '{tag}': invalid reference format"
+            )
+        for key in container_limits.keys():
+            if key not in constants.CONTAINER_LIMITS_KEYS:
+                raise errors.DockerException(
+                    f"invalid tag '{tag}': invalid reference format"
+                )
+        if custom_context:
+            if not fileobj:
+                raise TypeError("You must specify fileobj with custom_context")
+            context = fileobj
+        elif fileobj is not None:
+            context = utils.mkbuildcontext(fileobj)
+        elif path.startswith(('http://', 'https://',
+                              'git://', 'github.com/', 'git@')):
+            remote = path
+        elif not os.path.isdir(path):
+            raise TypeError("You must specify a directory to build in path")
+        else:
+            dockerignore = os.path.join(path, '.dockerignore')
+            exclude = None
+            if os.path.exists(dockerignore):
+                with open(dockerignore) as f:
+                    exclude = list(filter(
+                        lambda x: x != '' and x[0] != '#',
+                        [line.strip() for line in f.read().splitlines()]
+                    ))
+            dockerfile = process_dockerfile(dockerfile, path)
+            context = utils.tar(
+                path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
+            )
+            encoding = 'gzip' if gzip else encoding
+
+        u = self._url('/build')
+        params = {
+            't': tag,
+            'remote': remote,
+            'q': quiet,
+            'nocache': nocache,
+            'rm': rm,
+            'forcerm': forcerm,
+            'pull': pull,
+            'dockerfile': dockerfile,
+        }
+        params.update(container_limits)
+
+        if use_config_proxy:
+            proxy_args = self._proxy_configs.get_environment()
+            for k, v in proxy_args.items():
+                buildargs.setdefault(k, v)
+        if buildargs:
+            params.update({'buildargs': json.dumps(buildargs)})
+
+        if shmsize:
+            if utils.version_gte(self._version, '1.22'):
+                params.update({'shmsize': shmsize})
+            else:
+                raise errors.InvalidVersion(
+                    'shmsize was only introduced in API version 1.22'
+                )
+
+        if labels:
+            if utils.version_gte(self._version, '1.23'):
+                params.update({'labels': json.dumps(labels)})
+            else:
+                raise errors.InvalidVersion(
+                    'labels was only introduced in API version 1.23'
+                )
+
+        if cache_from:
+            if utils.version_gte(self._version, '1.25'):
+                params.update({'cachefrom': json.dumps(cache_from)})
+            else:
+                raise errors.InvalidVersion(
+                    'cache_from was only introduced in API version 1.25'
+                )
+
+        if target:
+            if utils.version_gte(self._version, '1.29'):
+                params.update({'target': target})
+            else:
+                raise errors.InvalidVersion(
+                    'target was only introduced in API version 1.29'
+                )
+
+        if network_mode:
+            if utils.version_gte(self._version, '1.25'):
+                params.update({'networkmode': network_mode})
+            else:
+                raise errors.InvalidVersion(
+                    'network_mode was only introduced in API version 1.25'
+                )
+
+        if squash:
+            if utils.version_gte(self._version, '1.25'):
+                params.update({'squash': squash})
+            else:
+                raise errors.InvalidVersion(
+                    'squash was only introduced in API version 1.25'
+                )
+
+        if extra_hosts is not None:
+            if utils.version_lt(self._version, '1.27'):
+                raise errors.InvalidVersion(
+                    'extra_hosts was only introduced in API version 1.27'
+                )
+
+            if isinstance(extra_hosts, dict):
+                extra_hosts = utils.format_extra_hosts(extra_hosts)
+            params.update({'extrahosts': extra_hosts})
+
+        if platform is not None:
+            if utils.version_lt(self._version, '1.32'):
+                raise errors.InvalidVersion(
+                    'platform was only introduced in API version 1.32'
+                )
+            params['platform'] = platform
+
+        if isolation is not None:
+            if utils.version_lt(self._version, '1.24'):
+                raise errors.InvalidVersion(
+                    'isolation was only introduced in API version 1.24'
+                )
+            params['isolation'] = isolation
+
+        if context is not None:
+            headers = {'Content-Type': 'application/tar'}
+            if encoding:
+                headers['Content-Encoding'] = encoding
+
+        self._set_auth_headers(headers)
+
+        response = self._post(
+            u,
+            data=context,
+            params=params,
+            headers=headers,
+            stream=True,
+            timeout=timeout,
+        )
+
+        if context is not None and not custom_context:
+            context.close()
+
+        return self._stream_helper(response, decode=decode)
+
+    @utils.minimum_version('1.31')
+    def prune_builds(self, filters=None, keep_storage=None, all=None):
+        """
+        Delete the builder cache
+
+        Args:
+            filters (dict): Filters to process on the prune list.
+                Needs Docker API v1.39+
+                Available filters:
+                - dangling (bool):  When set to true (or 1), prune only
+                unused and untagged images.
+                - until (str): Can be Unix timestamps, date formatted
+                timestamps, or Go duration strings (e.g. 10m, 1h30m) computed
+                relative to the daemon's local time.
+            keep_storage (int): Amount of disk space in bytes to keep for cache.
+                Needs Docker API v1.39+
+            all (bool): Remove all types of build cache.
+                Needs Docker API v1.39+
+
+        Returns:
+            (dict): A dictionary containing information about the operation's
+                    result. The ``SpaceReclaimed`` key indicates the amount of
+                    bytes of disk space reclaimed.
+
+        Raises:
+            :py:class:`docker.errors.APIError`
+                If the server returns an error.
+        """
+        url = self._url("/build/prune")
+        if (filters, keep_storage, all) != (None, None, None) \
+                and utils.version_lt(self._version, '1.39'):
+            raise errors.InvalidVersion(
+                '`filters`, `keep_storage`, and `all` args are only available '
+                'for API version > 1.38'
+            )
+        params = {}
+        if filters is not None:
+            params['filters'] = utils.convert_filters(filters)
+        if keep_storage is not None:
+            params['keep-storage'] = keep_storage
+        if all is not None:
+            params['all'] = all
+        return self._result(self._post(url, params=params), True)
+
+    def _set_auth_headers(self, headers):
+        log.debug('Looking for auth config')
+
+        # If we don't have any auth data so far, try reloading the config
+        # file one more time in case anything showed up in there.
+        if not self._auth_configs or self._auth_configs.is_empty:
+            log.debug("No auth config in memory - loading from filesystem")
+            self._auth_configs = auth.load_config(
+                credstore_env=self.credstore_env
+            )
+
+        # Send the full auth configuration (if any exists), since the build
+        # could use any (or all) of the registries.
+        if self._auth_configs:
+            auth_data = self._auth_configs.get_all_credentials()
+
+            # See https://github.com/docker/docker-py/issues/1683
+            if (auth.INDEX_URL not in auth_data and
+                    auth.INDEX_NAME in auth_data):
+                auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
+
+            log.debug(
+                "Sending auth config (%s)",
+                ', '.join(repr(k) for k in auth_data),
+            )
+
+            if auth_data:
+                headers['X-Registry-Config'] = auth.encode_header(
+                    auth_data
+                )
+        else:
+            log.debug('No auth config found')
+
+
+def process_dockerfile(dockerfile, path):
+    if not dockerfile:
+        return (None, None)
+
+    abs_dockerfile = dockerfile
+    if not os.path.isabs(dockerfile):
+        abs_dockerfile = os.path.join(path, dockerfile)
+        if constants.IS_WINDOWS_PLATFORM and path.startswith(
+                constants.WINDOWS_LONGPATH_PREFIX):
+            normpath = os.path.normpath(
+                abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):])
+            abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}'
+    if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+            os.path.relpath(abs_dockerfile, path).startswith('..')):
+        # Dockerfile not in context - read data to insert into tar later
+        with open(abs_dockerfile) as df:
+            return (
+                f'.dockerfile.{random.getrandbits(160):x}',
+                df.read()
+            )
+
+    # Dockerfile is inside the context - return path relative to context root
+    if dockerfile == abs_dockerfile:
+        # Only calculate relpath if necessary to avoid errors
+        # on Windows client -> Linux Docker
+        # see https://github.com/docker/compose/issues/5969
+        dockerfile = os.path.relpath(abs_dockerfile, path)
+    return (dockerfile, None)

+ 536 - 0
contrib/python/docker/docker/api/client.py

@@ -0,0 +1,536 @@
+import json
+import struct
+import urllib
+from functools import partial
+
+import requests
+import requests.adapters
+import requests.exceptions
+
+from .. import auth
+from ..constants import (
+    DEFAULT_DOCKER_API_VERSION,
+    DEFAULT_MAX_POOL_SIZE,
+    DEFAULT_NUM_POOLS,
+    DEFAULT_NUM_POOLS_SSH,
+    DEFAULT_TIMEOUT_SECONDS,
+    DEFAULT_USER_AGENT,
+    IS_WINDOWS_PLATFORM,
+    MINIMUM_DOCKER_API_VERSION,
+    STREAM_HEADER_SIZE_BYTES,
+)
+from ..errors import (
+    DockerException,
+    InvalidVersion,
+    TLSParameterError,
+    create_api_error_from_http_exception,
+)
+from ..tls import TLSConfig
+from ..transport import UnixHTTPAdapter
+from ..utils import check_resource, config, update_headers, utils
+from ..utils.json_stream import json_stream
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
+from .build import BuildApiMixin
+from .config import ConfigApiMixin
+from .container import ContainerApiMixin
+from .daemon import DaemonApiMixin
+from .exec_api import ExecApiMixin
+from .image import ImageApiMixin
+from .network import NetworkApiMixin
+from .plugin import PluginApiMixin
+from .secret import SecretApiMixin
+from .service import ServiceApiMixin
+from .swarm import SwarmApiMixin
+from .volume import VolumeApiMixin
+
+try:
+    from ..transport import NpipeHTTPAdapter
+except ImportError:
+    pass
+
+try:
+    from ..transport import SSHHTTPAdapter
+except ImportError:
+    pass
+
+
+class APIClient(
+        requests.Session,
+        BuildApiMixin,
+        ConfigApiMixin,
+        ContainerApiMixin,
+        DaemonApiMixin,
+        ExecApiMixin,
+        ImageApiMixin,
+        NetworkApiMixin,
+        PluginApiMixin,
+        SecretApiMixin,
+        ServiceApiMixin,
+        SwarmApiMixin,
+        VolumeApiMixin):
+    """
+    A low-level client for the Docker Engine API.
+
+    Example:
+
+        >>> import docker
+        >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+        >>> client.version()
+        {u'ApiVersion': u'1.33',
+         u'Arch': u'amd64',
+         u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+         u'GitCommit': u'f4ffd2511c',
+         u'GoVersion': u'go1.9.2',
+         u'KernelVersion': u'4.14.3-1-ARCH',
+         u'MinAPIVersion': u'1.12',
+         u'Os': u'linux',
+         u'Version': u'17.10.0-ce'}
+
+    Args:
+        base_url (str): URL to the Docker server. For example,
+            ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+        version (str): The version of the API to use. Set to ``auto`` to
+            automatically detect the server's version. Default: ``1.35``
+        timeout (int): Default timeout for API calls, in seconds.
+        tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+            ``True`` to enable it with default options, or pass a
+            :py:class:`~docker.tls.TLSConfig` object to use custom
+            configuration.
+        user_agent (str): Set a custom user agent for requests to the server.
+        credstore_env (dict): Override environment variables when calling the
+            credential store process.
+        use_ssh_client (bool): If set to `True`, an ssh connection is made
+            via shelling out to the ssh client. Ensure the ssh client is
+            installed and configured on the host.
+        max_pool_size (int): The maximum number of connections
+            to save in the pool.
+    """
+
+    __attrs__ = requests.Session.__attrs__ + ['_auth_configs',
+                                              '_general_configs',
+                                              '_version',
+                                              'base_url',
+                                              'timeout']
+
+    def __init__(self, base_url=None, version=None,
+                 timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+                 user_agent=DEFAULT_USER_AGENT, num_pools=None,
+                 credstore_env=None, use_ssh_client=False,
+                 max_pool_size=DEFAULT_MAX_POOL_SIZE):
+        super().__init__()
+
+        if tls and not base_url:
+            raise TLSParameterError(
+                'If using TLS, the base_url argument must be provided.'
+            )
+
+        self.base_url = base_url
+        self.timeout = timeout
+        self.headers['User-Agent'] = user_agent
+
+        self._general_configs = config.load_general_config()
+
+        proxy_config = self._general_configs.get('proxies', {})
+        try:
+            proxies = proxy_config[base_url]
+        except KeyError:
+            proxies = proxy_config.get('default', {})
+
+        self._proxy_configs = ProxyConfig.from_dict(proxies)
+
+        self._auth_configs = auth.load_config(
+            config_dict=self._general_configs, credstore_env=credstore_env,
+        )
+        self.credstore_env = credstore_env
+
+        base_url = utils.parse_host(
+            base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+        )
+        # SSH has a different default for num_pools to all other adapters
+        num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+            base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
+        if base_url.startswith('http+unix://'):
+            self._custom_adapter = UnixHTTPAdapter(
+                base_url, timeout, pool_connections=num_pools,
+                max_pool_size=max_pool_size
+            )
+            self.mount('http+docker://', self._custom_adapter)
+            self._unmount('http://', 'https://')
+            # host part of URL should be unused, but is resolved by requests
+            # module in proxy_bypass_macosx_sysconf()
+            self.base_url = 'http+docker://localhost'
+        elif base_url.startswith('npipe://'):
+            if not IS_WINDOWS_PLATFORM:
+                raise DockerException(
+                    'The npipe:// protocol is only supported on Windows'
+                )
+            try:
+                self._custom_adapter = NpipeHTTPAdapter(
+                    base_url, timeout, pool_connections=num_pools,
+                    max_pool_size=max_pool_size
+                )
+            except NameError as err:
+                raise DockerException(
+                    'Install pypiwin32 package to enable npipe:// support'
+                ) from err
+            self.mount('http+docker://', self._custom_adapter)
+            self.base_url = 'http+docker://localnpipe'
+        elif base_url.startswith('ssh://'):
+            try:
+                self._custom_adapter = SSHHTTPAdapter(
+                    base_url, timeout, pool_connections=num_pools,
+                    max_pool_size=max_pool_size, shell_out=use_ssh_client
+                )
+            except NameError as err:
+                raise DockerException(
+                    'Install paramiko package to enable ssh:// support'
+                ) from err
+            self.mount('http+docker://ssh', self._custom_adapter)
+            self._unmount('http://', 'https://')
+            self.base_url = 'http+docker://ssh'
+        else:
+            # Use SSLAdapter for the ability to specify SSL version
+            if isinstance(tls, TLSConfig):
+                tls.configure_client(self)
+            elif tls:
+                self._custom_adapter = requests.adapters.HTTPAdapter(
+                    pool_connections=num_pools)
+                self.mount('https://', self._custom_adapter)
+            self.base_url = base_url
+
+        # version detection needs to be after unix adapter mounting
+        if version is None or (isinstance(
+                                version,
+                                str
+                                ) and version.lower() == 'auto'):
+            try:
+                self._version = self._retrieve_server_version()
+            except:
+                self._version = DEFAULT_DOCKER_API_VERSION
+        else:
+            self._version = version
+        if not isinstance(self._version, str):
+            raise DockerException(
+                'Version parameter must be a string or None. '
+                f'Found {type(version).__name__}'
+            )
+        if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+            raise InvalidVersion(
+                f'API versions below {MINIMUM_DOCKER_API_VERSION} are '
+                f'no longer supported by this library.'
+            )
+
+    def _retrieve_server_version(self):
+        try:
+            return self.version(api_version=False)["ApiVersion"]
+        except KeyError as ke:
+            raise DockerException(
+                'Invalid response from docker daemon: key "ApiVersion"'
+                ' is missing.'
+            ) from ke
+        except Exception as e:
+            raise DockerException(
+                f'Error while fetching server API version: {e}'
+            ) from e
+
+    def _set_request_timeout(self, kwargs):
+        """Prepare the kwargs for an HTTP request by inserting the timeout
+        parameter, if not already present."""
+        kwargs.setdefault('timeout', self.timeout)
+        return kwargs
+
+    @update_headers
+    def _post(self, url, **kwargs):
+        return self.post(url, **self._set_request_timeout(kwargs))
+
+    @update_headers
+    def _get(self, url, **kwargs):
+        return self.get(url, **self._set_request_timeout(kwargs))
+
+    @update_headers
+    def _put(self, url, **kwargs):
+        return self.put(url, **self._set_request_timeout(kwargs))
+
+    @update_headers
+    def _delete(self, url, **kwargs):
+        return self.delete(url, **self._set_request_timeout(kwargs))
+
+    def _url(self, pathfmt, *args, **kwargs):
+        for arg in args:
+            if not isinstance(arg, str):
+                raise ValueError(
+                    f'Expected a string but found {arg} ({type(arg)}) instead'
+                )
+
+        quote_f = partial(urllib.parse.quote, safe="/:")
+        args = map(quote_f, args)
+
+        formatted_path = pathfmt.format(*args)
+        if kwargs.get('versioned_api', True):
+            return f'{self.base_url}/v{self._version}{formatted_path}'
+        else:
+            return f'{self.base_url}{formatted_path}'
+
+    def _raise_for_status(self, response):
+        """Raises stored :class:`APIError`, if one occurred."""
+        try:
+            response.raise_for_status()
+        except requests.exceptions.HTTPError as e:
+            raise create_api_error_from_http_exception(e) from e
+
+    def _result(self, response, json=False, binary=False):
+        assert not (json and binary)
+        self._raise_for_status(response)
+
+        if json:
+            return response.json()
+        if binary:
+            return response.content
+        return response.text
+
+    def _post_json(self, url, data, **kwargs):
+        # Go <1.1 can't unserialize null to a string
+        # so we do this disgusting thing here.
+        data2 = {}
+        if data is not None and isinstance(data, dict):
+            for k, v in iter(data.items()):
+                if v is not None:
+                    data2[k] = v
+        elif data is not None:
+            data2 = data
+
+        if 'headers' not in kwargs:
+            kwargs['headers'] = {}
+        kwargs['headers']['Content-Type'] = 'application/json'
+        return self._post(url, data=json.dumps(data2), **kwargs)
+
+    def _attach_params(self, override=None):
+        return override or {
+            'stdout': 1,
+            'stderr': 1,
+            'stream': 1
+        }
+
+    @check_resource('container')
+    def _attach_websocket(self, container, params=None):
+        url = self._url("/containers/{0}/attach/ws", container)
+        req = requests.Request("POST", url, params=self._attach_params(params))
+        full_url = req.prepare().url
+        full_url = full_url.replace("http://", "ws://", 1)
+        full_url = full_url.replace("https://", "wss://", 1)
+        return self._create_websocket_connection(full_url)
+
+    def _create_websocket_connection(self, url):
+        try:
+            import websocket
+            return websocket.create_connection(url)
+        except ImportError as ie:
+            raise DockerException(
+                'The `websocket-client` library is required '
+                'for using websocket connections. '
+                'You can install the `docker` library '
+                'with the [websocket] extra to install it.'
+            ) from ie
+
+    def _get_raw_response_socket(self, response):
+        self._raise_for_status(response)
+        if self.base_url == "http+docker://localnpipe":
+            sock = response.raw._fp.fp.raw.sock
+        elif self.base_url.startswith('http+docker://ssh'):
+            sock = response.raw._fp.fp.channel
+        else:
+            sock = response.raw._fp.fp.raw
+            if self.base_url.startswith("https://"):
+                sock = sock._sock
+        try:
+            # Keep a reference to the response to stop it being garbage
+            # collected. If the response is garbage collected, it will
+            # close TLS sockets.
+            sock._response = response
+        except AttributeError:
+            # UNIX sockets can't have attributes set on them, but that's
+            # fine because we won't be doing TLS over them
+            pass
+
+        return sock
+
+    def _stream_helper(self, response, decode=False):
+        """Generator for data coming from a chunked-encoded HTTP response."""
+
+        if response.raw._fp.chunked:
+            if decode:
+                yield from json_stream(self._stream_helper(response, False))
+            else:
+                reader = response.raw
+                while not reader.closed:
+                    # this read call will block until we get a chunk
+                    data = reader.read(1)
+                    if not data:
+                        break
+                    if reader._fp.chunk_left:
+                        data += reader.read(reader._fp.chunk_left)
+                    yield data
+        else:
+            # Response isn't chunked, meaning we probably
+            # encountered an error immediately
+            yield self._result(response, json=decode)
+
+    def _multiplexed_buffer_helper(self, response):
+        """A generator of multiplexed data blocks read from a buffered
+        response."""
+        buf = self._result(response, binary=True)
+        buf_length = len(buf)
+        walker = 0
+        while True:
+            if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+                break
+            header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+            _, length = struct.unpack_from('>BxxxL', header)
+            start = walker + STREAM_HEADER_SIZE_BYTES
+            end = start + length
+            walker = end
+            yield buf[start:end]
+
+    def _multiplexed_response_stream_helper(self, response):
+        """A generator of multiplexed data blocks coming from a response
+        stream."""
+
+        # Disable timeout on the underlying socket to prevent
+        # Read timed out(s) for long running processes
+        socket = self._get_raw_response_socket(response)
+        self._disable_socket_timeout(socket)
+
+        while True:
+            header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+            if not header:
+                break
+            _, length = struct.unpack('>BxxxL', header)
+            if not length:
+                continue
+            data = response.raw.read(length)
+            if not data:
+                break
+            yield data
+
+    def _stream_raw_result(self, response, chunk_size=1, decode=True):
+        ''' Stream result for TTY-enabled container and raw binary data'''
+        self._raise_for_status(response)
+
+        # Disable timeout on the underlying socket to prevent
+        # Read timed out(s) for long running processes
+        socket = self._get_raw_response_socket(response)
+        self._disable_socket_timeout(socket)
+
+        yield from response.iter_content(chunk_size, decode)
+
+    def _read_from_socket(self, response, stream, tty=True, demux=False):
+        """Consume all data from the socket, close the response and return the
+        data. If stream=True, then a generator is returned instead and the
+        caller is responsible for closing the response.
+        """
+        socket = self._get_raw_response_socket(response)
+
+        gen = frames_iter(socket, tty)
+
+        if demux:
+            # The generator will output tuples (stdout, stderr)
+            gen = (demux_adaptor(*frame) for frame in gen)
+        else:
+            # The generator will output strings
+            gen = (data for (_, data) in gen)
+
+        if stream:
+            return gen
+        else:
+            try:
+                # Wait for all frames, concatenate them, and return the result
+                return consume_socket_output(gen, demux=demux)
+            finally:
+                response.close()
+
+    def _disable_socket_timeout(self, socket):
+        """ Depending on the combination of python version and whether we're
+        connecting over http or https, we might need to access _sock, which
+        may or may not exist; or we may need to just settimeout on socket
+        itself, which also may or may not have settimeout on it. To avoid
+        missing the correct one, we try both.
+
+        We also do not want to set the timeout if it is already disabled, as
+        you run the risk of changing a socket that was non-blocking to
+        blocking, for example when using gevent.
+        """
+        sockets = [socket, getattr(socket, '_sock', None)]
+
+        for s in sockets:
+            if not hasattr(s, 'settimeout'):
+                continue
+
+            timeout = -1
+
+            if hasattr(s, 'gettimeout'):
+                timeout = s.gettimeout()
+
+            # Don't change the timeout if it is already disabled.
+            if timeout is None or timeout == 0.0:
+                continue
+
+            s.settimeout(None)
+
+    @check_resource('container')
+    def _check_is_tty(self, container):
+        cont = self.inspect_container(container)
+        return cont['Config']['Tty']
+
+    def _get_result(self, container, stream, res):
+        return self._get_result_tty(stream, res, self._check_is_tty(container))
+
+    def _get_result_tty(self, stream, res, is_tty):
+        # We should also use raw streaming (without keep-alives)
+        # if we're dealing with a tty-enabled container.
+        if is_tty:
+            return self._stream_raw_result(res) if stream else \
+                self._result(res, binary=True)
+
+        self._raise_for_status(res)
+        sep = b''
+        if stream:
+            return self._multiplexed_response_stream_helper(res)
+        else:
+            return sep.join(
+                list(self._multiplexed_buffer_helper(res))
+            )
+
+    def _unmount(self, *args):
+        for proto in args:
+            self.adapters.pop(proto)
+
+    def get_adapter(self, url):
+        try:
+            return super().get_adapter(url)
+        except requests.exceptions.InvalidSchema as e:
+            if self._custom_adapter:
+                return self._custom_adapter
+            else:
+                raise e
+
+    @property
+    def api_version(self):
+        return self._version
+
+    def reload_config(self, dockercfg_path=None):
+        """
+        Force a reload of the auth configuration
+
+        Args:
+            dockercfg_path (str): Use a custom path for the Docker config file
+                (default ``$HOME/.docker/config.json`` if present,
+                otherwise ``$HOME/.dockercfg``)
+
+        Returns:
+            None
+        """
+        self._auth_configs = auth.load_config(
+            dockercfg_path, credstore_env=self.credstore_env
+        )

+ 92 - 0
contrib/python/docker/docker/api/config.py

@@ -0,0 +1,92 @@
+import base64
+
+from .. import utils
+
+
+class ConfigApiMixin:
+    @utils.minimum_version('1.30')
+    def create_config(self, name, data, labels=None, templating=None):
+        """
+            Create a config
+
+            Args:
+                name (string): Name of the config
+                data (bytes): Config data to be stored
+                labels (dict): A mapping of labels to assign to the config
+                templating (dict): dictionary containing the name of the
+                                   templating driver to be used expressed as
+                                   { name: <templating_driver_name>}
+
+            Returns (dict): ID of the newly created config
+        """
+        if not isinstance(data, bytes):
+            data = data.encode('utf-8')
+
+        data = base64.b64encode(data)
+        data = data.decode('ascii')
+        body = {
+            'Data': data,
+            'Name': name,
+            'Labels': labels,
+            'Templating': templating
+        }
+
+        url = self._url('/configs/create')
+        return self._result(
+            self._post_json(url, data=body), True
+        )
+
+    @utils.minimum_version('1.30')
+    @utils.check_resource('id')
+    def inspect_config(self, id):
+        """
+            Retrieve config metadata
+
+            Args:
+                id (string): Full ID of the config to inspect
+
+            Returns (dict): A dictionary of metadata
+
+            Raises:
+                :py:class:`docker.errors.NotFound`
+                    if no config with that ID exists
+        """
+        url = self._url('/configs/{0}', id)
+        return self._result(self._get(url), True)
+
+    @utils.minimum_version('1.30')
+    @utils.check_resource('id')
+    def remove_config(self, id):
+        """
+            Remove a config
+
+            Args:
+                id (string): Full ID of the config to remove
+
+            Returns (boolean): True if successful
+
+            Raises:
+                :py:class:`docker.errors.NotFound`
+                    if no config with that ID exists
+        """
+        url = self._url('/configs/{0}', id)
+        res = self._delete(url)
+        self._raise_for_status(res)
+        return True
+
+    @utils.minimum_version('1.30')
+    def configs(self, filters=None):
+        """
+            List configs
+
+            Args:
+                filters (dict): A map of filters to process on the configs
+                list. Available filters: ``names``
+
+            Returns (list): A list of configs
+        """
+        url = self._url('/configs')
+        params = {}
+        if filters:
+            params['filters'] = utils.convert_filters(filters)
+        return self._result(self._get(url, params=params), True)

Some files were not shown because too many files changed in this diff