first commit
This commit is contained in:
@@ -0,0 +1 @@
|
||||
from .client import APIClient
|
||||
382
backend/venv/lib/python3.9/site-packages/docker/api/build.py
Normal file
382
backend/venv/lib/python3.9/site-packages/docker/api/build.py
Normal file
@@ -0,0 +1,382 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
|
||||
from .. import auth, constants, errors, utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BuildApiMixin:
|
||||
def build(self, path=None, tag=None, quiet=False, fileobj=None,
|
||||
nocache=False, rm=False, timeout=None,
|
||||
custom_context=False, encoding=None, pull=False,
|
||||
forcerm=False, dockerfile=None, container_limits=None,
|
||||
decode=False, buildargs=None, gzip=False, shmsize=None,
|
||||
labels=None, cache_from=None, target=None, network_mode=None,
|
||||
squash=None, extra_hosts=None, platform=None, isolation=None,
|
||||
use_config_proxy=True):
|
||||
"""
|
||||
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
|
||||
needs to be set. ``path`` can be a local path (to a directory
|
||||
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
|
||||
readable file-like object to a Dockerfile.
|
||||
|
||||
If you have a tar file for the Docker build context (including a
|
||||
Dockerfile) already, pass a readable file-like object to ``fileobj``
|
||||
and also pass ``custom_context=True``. If the stream is compressed
|
||||
also, set ``encoding`` to the correct value (e.g ``gzip``).
|
||||
|
||||
Example:
|
||||
>>> from io import BytesIO
|
||||
>>> from docker import APIClient
|
||||
>>> dockerfile = '''
|
||||
... # Shared Volume
|
||||
... FROM busybox:buildroot-2014.02
|
||||
... VOLUME /data
|
||||
... CMD ["/bin/sh"]
|
||||
... '''
|
||||
>>> f = BytesIO(dockerfile.encode('utf-8'))
|
||||
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
|
||||
>>> response = [line for line in cli.build(
|
||||
... fileobj=f, rm=True, tag='yourname/volume'
|
||||
... )]
|
||||
>>> response
|
||||
['{"stream":" ---\\u003e a9eb17255234\\n"}',
|
||||
'{"stream":"Step 1 : VOLUME /data\\n"}',
|
||||
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
|
||||
'{"stream":" ---\\u003e 713bca62012e\\n"}',
|
||||
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
|
||||
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
|
||||
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
|
||||
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
|
||||
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
|
||||
'{"stream":"Successfully built 032b8b2855fc\\n"}']
|
||||
|
||||
Args:
|
||||
path (str): Path to the directory containing the Dockerfile
|
||||
fileobj: A file object to use as the Dockerfile. (Or a file-like
|
||||
object)
|
||||
tag (str): A tag to add to the final image
|
||||
quiet (bool): Whether to return the status
|
||||
nocache (bool): Don't use the cache when set to ``True``
|
||||
rm (bool): Remove intermediate containers. The ``docker build``
|
||||
command now defaults to ``--rm=true``, but we have kept the old
|
||||
default of `False` to preserve backward compatibility
|
||||
timeout (int): HTTP timeout
|
||||
custom_context (bool): Optional if using ``fileobj``
|
||||
encoding (str): The encoding for a stream. Set to ``gzip`` for
|
||||
compressing
|
||||
pull (bool): Downloads any updates to the FROM image in Dockerfiles
|
||||
forcerm (bool): Always remove intermediate containers, even after
|
||||
unsuccessful builds
|
||||
dockerfile (str): path within the build context to the Dockerfile
|
||||
gzip (bool): If set to ``True``, gzip compression/encoding is used
|
||||
buildargs (dict): A dictionary of build arguments
|
||||
container_limits (dict): A dictionary of limits applied to each
|
||||
container created by the build process. Valid keys:
|
||||
|
||||
- memory (int): set memory limit for build
|
||||
- memswap (int): Total memory (memory + swap), -1 to disable
|
||||
swap
|
||||
- cpushares (int): CPU shares (relative weight)
|
||||
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
|
||||
``"0-3"``, ``"0,1"``
|
||||
decode (bool): If set to ``True``, the returned stream will be
|
||||
decoded into dicts on the fly. Default ``False``
|
||||
shmsize (int): Size of `/dev/shm` in bytes. The size must be
|
||||
greater than 0. If omitted the system uses 64MB
|
||||
labels (dict): A dictionary of labels to set on the image
|
||||
cache_from (:py:class:`list`): A list of images used for build
|
||||
cache resolution
|
||||
target (str): Name of the build-stage to build in a multi-stage
|
||||
Dockerfile
|
||||
network_mode (str): networking mode for the run commands during
|
||||
build
|
||||
squash (bool): Squash the resulting images layers into a
|
||||
single layer.
|
||||
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
|
||||
containers, as a mapping of hostname to IP address.
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``
|
||||
isolation (str): Isolation technology used during build.
|
||||
Default: `None`.
|
||||
use_config_proxy (bool): If ``True``, and if the docker client
|
||||
configuration file (``~/.docker/config.json`` by default)
|
||||
contains a proxy configuration, the corresponding environment
|
||||
variables will be set in the container being built.
|
||||
|
||||
Returns:
|
||||
A generator for the build output.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
``TypeError``
|
||||
If neither ``path`` nor ``fileobj`` is specified.
|
||||
"""
|
||||
remote = context = None
|
||||
headers = {}
|
||||
container_limits = container_limits or {}
|
||||
buildargs = buildargs or {}
|
||||
if path is None and fileobj is None:
|
||||
raise TypeError("Either path or fileobj needs to be provided.")
|
||||
if gzip and encoding is not None:
|
||||
raise errors.DockerException(
|
||||
'Can not use custom encoding if gzip is enabled'
|
||||
)
|
||||
if tag is not None:
|
||||
if not utils.match_tag(tag):
|
||||
raise errors.DockerException(
|
||||
f"invalid tag '{tag}': invalid reference format"
|
||||
)
|
||||
for key in container_limits.keys():
|
||||
if key not in constants.CONTAINER_LIMITS_KEYS:
|
||||
raise errors.DockerException(
|
||||
f"invalid tag '{tag}': invalid reference format"
|
||||
)
|
||||
if custom_context:
|
||||
if not fileobj:
|
||||
raise TypeError("You must specify fileobj with custom_context")
|
||||
context = fileobj
|
||||
elif fileobj is not None:
|
||||
context = utils.mkbuildcontext(fileobj)
|
||||
elif path.startswith(('http://', 'https://',
|
||||
'git://', 'github.com/', 'git@')):
|
||||
remote = path
|
||||
elif not os.path.isdir(path):
|
||||
raise TypeError("You must specify a directory to build in path")
|
||||
else:
|
||||
dockerignore = os.path.join(path, '.dockerignore')
|
||||
exclude = None
|
||||
if os.path.exists(dockerignore):
|
||||
with open(dockerignore) as f:
|
||||
exclude = list(filter(
|
||||
lambda x: x != '' and x[0] != '#',
|
||||
[line.strip() for line in f.read().splitlines()]
|
||||
))
|
||||
dockerfile = process_dockerfile(dockerfile, path)
|
||||
context = utils.tar(
|
||||
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
|
||||
)
|
||||
encoding = 'gzip' if gzip else encoding
|
||||
|
||||
u = self._url('/build')
|
||||
params = {
|
||||
't': tag,
|
||||
'remote': remote,
|
||||
'q': quiet,
|
||||
'nocache': nocache,
|
||||
'rm': rm,
|
||||
'forcerm': forcerm,
|
||||
'pull': pull,
|
||||
'dockerfile': dockerfile,
|
||||
}
|
||||
params.update(container_limits)
|
||||
|
||||
if use_config_proxy:
|
||||
proxy_args = self._proxy_configs.get_environment()
|
||||
for k, v in proxy_args.items():
|
||||
buildargs.setdefault(k, v)
|
||||
if buildargs:
|
||||
params.update({'buildargs': json.dumps(buildargs)})
|
||||
|
||||
if shmsize:
|
||||
if utils.version_gte(self._version, '1.22'):
|
||||
params.update({'shmsize': shmsize})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'shmsize was only introduced in API version 1.22'
|
||||
)
|
||||
|
||||
if labels:
|
||||
if utils.version_gte(self._version, '1.23'):
|
||||
params.update({'labels': json.dumps(labels)})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'labels was only introduced in API version 1.23'
|
||||
)
|
||||
|
||||
if cache_from:
|
||||
if utils.version_gte(self._version, '1.25'):
|
||||
params.update({'cachefrom': json.dumps(cache_from)})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'cache_from was only introduced in API version 1.25'
|
||||
)
|
||||
|
||||
if target:
|
||||
if utils.version_gte(self._version, '1.29'):
|
||||
params.update({'target': target})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'target was only introduced in API version 1.29'
|
||||
)
|
||||
|
||||
if network_mode:
|
||||
if utils.version_gte(self._version, '1.25'):
|
||||
params.update({'networkmode': network_mode})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'network_mode was only introduced in API version 1.25'
|
||||
)
|
||||
|
||||
if squash:
|
||||
if utils.version_gte(self._version, '1.25'):
|
||||
params.update({'squash': squash})
|
||||
else:
|
||||
raise errors.InvalidVersion(
|
||||
'squash was only introduced in API version 1.25'
|
||||
)
|
||||
|
||||
if extra_hosts is not None:
|
||||
if utils.version_lt(self._version, '1.27'):
|
||||
raise errors.InvalidVersion(
|
||||
'extra_hosts was only introduced in API version 1.27'
|
||||
)
|
||||
|
||||
if isinstance(extra_hosts, dict):
|
||||
extra_hosts = utils.format_extra_hosts(extra_hosts)
|
||||
params.update({'extrahosts': extra_hosts})
|
||||
|
||||
if platform is not None:
|
||||
if utils.version_lt(self._version, '1.32'):
|
||||
raise errors.InvalidVersion(
|
||||
'platform was only introduced in API version 1.32'
|
||||
)
|
||||
params['platform'] = platform
|
||||
|
||||
if isolation is not None:
|
||||
if utils.version_lt(self._version, '1.24'):
|
||||
raise errors.InvalidVersion(
|
||||
'isolation was only introduced in API version 1.24'
|
||||
)
|
||||
params['isolation'] = isolation
|
||||
|
||||
if context is not None:
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
if encoding:
|
||||
headers['Content-Encoding'] = encoding
|
||||
|
||||
self._set_auth_headers(headers)
|
||||
|
||||
response = self._post(
|
||||
u,
|
||||
data=context,
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if context is not None and not custom_context:
|
||||
context.close()
|
||||
|
||||
return self._stream_helper(response, decode=decode)
|
||||
|
||||
@utils.minimum_version('1.31')
|
||||
def prune_builds(self, filters=None, keep_storage=None, all=None):
|
||||
"""
|
||||
Delete the builder cache
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the prune list.
|
||||
Needs Docker API v1.39+
|
||||
Available filters:
|
||||
- dangling (bool): When set to true (or 1), prune only
|
||||
unused and untagged images.
|
||||
- until (str): Can be Unix timestamps, date formatted
|
||||
timestamps, or Go duration strings (e.g. 10m, 1h30m) computed
|
||||
relative to the daemon's local time.
|
||||
keep_storage (int): Amount of disk space in bytes to keep for cache.
|
||||
Needs Docker API v1.39+
|
||||
all (bool): Remove all types of build cache.
|
||||
Needs Docker API v1.39+
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary containing information about the operation's
|
||||
result. The ``SpaceReclaimed`` key indicates the amount of
|
||||
bytes of disk space reclaimed.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/build/prune")
|
||||
if (filters, keep_storage, all) != (None, None, None) \
|
||||
and utils.version_lt(self._version, '1.39'):
|
||||
raise errors.InvalidVersion(
|
||||
'`filters`, `keep_storage`, and `all` args are only available '
|
||||
'for API version > 1.38'
|
||||
)
|
||||
params = {}
|
||||
if filters is not None:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
if keep_storage is not None:
|
||||
params['keep-storage'] = keep_storage
|
||||
if all is not None:
|
||||
params['all'] = all
|
||||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
def _set_auth_headers(self, headers):
|
||||
log.debug('Looking for auth config')
|
||||
|
||||
# If we don't have any auth data so far, try reloading the config
|
||||
# file one more time in case anything showed up in there.
|
||||
if not self._auth_configs or self._auth_configs.is_empty:
|
||||
log.debug("No auth config in memory - loading from filesystem")
|
||||
self._auth_configs = auth.load_config(
|
||||
credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
# Send the full auth configuration (if any exists), since the build
|
||||
# could use any (or all) of the registries.
|
||||
if self._auth_configs:
|
||||
auth_data = self._auth_configs.get_all_credentials()
|
||||
|
||||
# See https://github.com/docker/docker-py/issues/1683
|
||||
if (auth.INDEX_URL not in auth_data and
|
||||
auth.INDEX_NAME in auth_data):
|
||||
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
|
||||
|
||||
log.debug(
|
||||
"Sending auth config (%s)",
|
||||
', '.join(repr(k) for k in auth_data),
|
||||
)
|
||||
|
||||
if auth_data:
|
||||
headers['X-Registry-Config'] = auth.encode_header(
|
||||
auth_data
|
||||
)
|
||||
else:
|
||||
log.debug('No auth config found')
|
||||
|
||||
|
||||
def process_dockerfile(dockerfile, path):
|
||||
if not dockerfile:
|
||||
return (None, None)
|
||||
|
||||
abs_dockerfile = dockerfile
|
||||
if not os.path.isabs(dockerfile):
|
||||
abs_dockerfile = os.path.join(path, dockerfile)
|
||||
if constants.IS_WINDOWS_PLATFORM and path.startswith(
|
||||
constants.WINDOWS_LONGPATH_PREFIX):
|
||||
normpath = os.path.normpath(
|
||||
abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):])
|
||||
abs_dockerfile = f'{constants.WINDOWS_LONGPATH_PREFIX}{normpath}'
|
||||
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
|
||||
os.path.relpath(abs_dockerfile, path).startswith('..')):
|
||||
# Dockerfile not in context - read data to insert into tar later
|
||||
with open(abs_dockerfile) as df:
|
||||
return (
|
||||
f'.dockerfile.{random.getrandbits(160):x}',
|
||||
df.read()
|
||||
)
|
||||
|
||||
# Dockerfile is inside the context - return path relative to context root
|
||||
if dockerfile == abs_dockerfile:
|
||||
# Only calculate relpath if necessary to avoid errors
|
||||
# on Windows client -> Linux Docker
|
||||
# see https://github.com/docker/compose/issues/5969
|
||||
dockerfile = os.path.relpath(abs_dockerfile, path)
|
||||
return (dockerfile, None)
|
||||
532
backend/venv/lib/python3.9/site-packages/docker/api/client.py
Normal file
532
backend/venv/lib/python3.9/site-packages/docker/api/client.py
Normal file
@@ -0,0 +1,532 @@
|
||||
import json
|
||||
import struct
|
||||
import urllib
|
||||
from functools import partial
|
||||
|
||||
import requests
|
||||
import requests.adapters
|
||||
import requests.exceptions
|
||||
|
||||
from .. import auth
|
||||
from ..constants import (
|
||||
DEFAULT_MAX_POOL_SIZE,
|
||||
DEFAULT_NUM_POOLS,
|
||||
DEFAULT_NUM_POOLS_SSH,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_USER_AGENT,
|
||||
IS_WINDOWS_PLATFORM,
|
||||
MINIMUM_DOCKER_API_VERSION,
|
||||
STREAM_HEADER_SIZE_BYTES,
|
||||
)
|
||||
from ..errors import (
|
||||
DockerException,
|
||||
InvalidVersion,
|
||||
TLSParameterError,
|
||||
create_api_error_from_http_exception,
|
||||
)
|
||||
from ..tls import TLSConfig
|
||||
from ..transport import UnixHTTPAdapter
|
||||
from ..utils import check_resource, config, update_headers, utils
|
||||
from ..utils.json_stream import json_stream
|
||||
from ..utils.proxy import ProxyConfig
|
||||
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
|
||||
from .build import BuildApiMixin
|
||||
from .config import ConfigApiMixin
|
||||
from .container import ContainerApiMixin
|
||||
from .daemon import DaemonApiMixin
|
||||
from .exec_api import ExecApiMixin
|
||||
from .image import ImageApiMixin
|
||||
from .network import NetworkApiMixin
|
||||
from .plugin import PluginApiMixin
|
||||
from .secret import SecretApiMixin
|
||||
from .service import ServiceApiMixin
|
||||
from .swarm import SwarmApiMixin
|
||||
from .volume import VolumeApiMixin
|
||||
|
||||
try:
|
||||
from ..transport import NpipeHTTPAdapter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
from ..transport import SSHHTTPAdapter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class APIClient(
|
||||
requests.Session,
|
||||
BuildApiMixin,
|
||||
ConfigApiMixin,
|
||||
ContainerApiMixin,
|
||||
DaemonApiMixin,
|
||||
ExecApiMixin,
|
||||
ImageApiMixin,
|
||||
NetworkApiMixin,
|
||||
PluginApiMixin,
|
||||
SecretApiMixin,
|
||||
ServiceApiMixin,
|
||||
SwarmApiMixin,
|
||||
VolumeApiMixin):
|
||||
"""
|
||||
A low-level client for the Docker Engine API.
|
||||
|
||||
Example:
|
||||
|
||||
>>> import docker
|
||||
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
|
||||
>>> client.version()
|
||||
{u'ApiVersion': u'1.33',
|
||||
u'Arch': u'amd64',
|
||||
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
|
||||
u'GitCommit': u'f4ffd2511c',
|
||||
u'GoVersion': u'go1.9.2',
|
||||
u'KernelVersion': u'4.14.3-1-ARCH',
|
||||
u'MinAPIVersion': u'1.12',
|
||||
u'Os': u'linux',
|
||||
u'Version': u'17.10.0-ce'}
|
||||
|
||||
Args:
|
||||
base_url (str): URL to the Docker server. For example,
|
||||
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
|
||||
version (str): The version of the API to use. Set to ``auto`` to
|
||||
automatically detect the server's version. Default: ``1.35``
|
||||
timeout (int): Default timeout for API calls, in seconds.
|
||||
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
|
||||
``True`` to enable it with default options, or pass a
|
||||
:py:class:`~docker.tls.TLSConfig` object to use custom
|
||||
configuration.
|
||||
user_agent (str): Set a custom user agent for requests to the server.
|
||||
credstore_env (dict): Override environment variables when calling the
|
||||
credential store process.
|
||||
use_ssh_client (bool): If set to `True`, an ssh connection is made
|
||||
via shelling out to the ssh client. Ensure the ssh client is
|
||||
installed and configured on the host.
|
||||
max_pool_size (int): The maximum number of connections
|
||||
to save in the pool.
|
||||
"""
|
||||
|
||||
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
|
||||
'_general_configs',
|
||||
'_version',
|
||||
'base_url',
|
||||
'timeout']
|
||||
|
||||
def __init__(self, base_url=None, version=None,
|
||||
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
|
||||
user_agent=DEFAULT_USER_AGENT, num_pools=None,
|
||||
credstore_env=None, use_ssh_client=False,
|
||||
max_pool_size=DEFAULT_MAX_POOL_SIZE):
|
||||
super().__init__()
|
||||
|
||||
if tls and not base_url:
|
||||
raise TLSParameterError(
|
||||
'If using TLS, the base_url argument must be provided.'
|
||||
)
|
||||
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.headers['User-Agent'] = user_agent
|
||||
|
||||
self._general_configs = config.load_general_config()
|
||||
|
||||
proxy_config = self._general_configs.get('proxies', {})
|
||||
try:
|
||||
proxies = proxy_config[base_url]
|
||||
except KeyError:
|
||||
proxies = proxy_config.get('default', {})
|
||||
|
||||
self._proxy_configs = ProxyConfig.from_dict(proxies)
|
||||
|
||||
self._auth_configs = auth.load_config(
|
||||
config_dict=self._general_configs, credstore_env=credstore_env,
|
||||
)
|
||||
self.credstore_env = credstore_env
|
||||
|
||||
base_url = utils.parse_host(
|
||||
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
|
||||
)
|
||||
# SSH has a different default for num_pools to all other adapters
|
||||
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
|
||||
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
|
||||
|
||||
if base_url.startswith('http+unix://'):
|
||||
self._custom_adapter = UnixHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
# host part of URL should be unused, but is resolved by requests
|
||||
# module in proxy_bypass_macosx_sysconf()
|
||||
self.base_url = 'http+docker://localhost'
|
||||
elif base_url.startswith('npipe://'):
|
||||
if not IS_WINDOWS_PLATFORM:
|
||||
raise DockerException(
|
||||
'The npipe:// protocol is only supported on Windows'
|
||||
)
|
||||
try:
|
||||
self._custom_adapter = NpipeHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size
|
||||
)
|
||||
except NameError as err:
|
||||
raise DockerException(
|
||||
'Install pypiwin32 package to enable npipe:// support'
|
||||
) from err
|
||||
self.mount('http+docker://', self._custom_adapter)
|
||||
self.base_url = 'http+docker://localnpipe'
|
||||
elif base_url.startswith('ssh://'):
|
||||
try:
|
||||
self._custom_adapter = SSHHTTPAdapter(
|
||||
base_url, timeout, pool_connections=num_pools,
|
||||
max_pool_size=max_pool_size, shell_out=use_ssh_client
|
||||
)
|
||||
except NameError as err:
|
||||
raise DockerException(
|
||||
'Install paramiko package to enable ssh:// support'
|
||||
) from err
|
||||
self.mount('http+docker://ssh', self._custom_adapter)
|
||||
self._unmount('http://', 'https://')
|
||||
self.base_url = 'http+docker://ssh'
|
||||
else:
|
||||
# Use SSLAdapter for the ability to specify SSL version
|
||||
if isinstance(tls, TLSConfig):
|
||||
tls.configure_client(self)
|
||||
elif tls:
|
||||
self._custom_adapter = requests.adapters.HTTPAdapter(
|
||||
pool_connections=num_pools)
|
||||
self.mount('https://', self._custom_adapter)
|
||||
self.base_url = base_url
|
||||
|
||||
# version detection needs to be after unix adapter mounting
|
||||
if version is None or (isinstance(
|
||||
version,
|
||||
str
|
||||
) and version.lower() == 'auto'):
|
||||
self._version = self._retrieve_server_version()
|
||||
else:
|
||||
self._version = version
|
||||
if not isinstance(self._version, str):
|
||||
raise DockerException(
|
||||
'Version parameter must be a string or None. '
|
||||
f'Found {type(version).__name__}'
|
||||
)
|
||||
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
|
||||
raise InvalidVersion(
|
||||
f'API versions below {MINIMUM_DOCKER_API_VERSION} are '
|
||||
f'no longer supported by this library.'
|
||||
)
|
||||
|
||||
def _retrieve_server_version(self):
|
||||
try:
|
||||
return self.version(api_version=False)["ApiVersion"]
|
||||
except KeyError as ke:
|
||||
raise DockerException(
|
||||
'Invalid response from docker daemon: key "ApiVersion"'
|
||||
' is missing.'
|
||||
) from ke
|
||||
except Exception as e:
|
||||
raise DockerException(
|
||||
f'Error while fetching server API version: {e}'
|
||||
) from e
|
||||
|
||||
def _set_request_timeout(self, kwargs):
|
||||
"""Prepare the kwargs for an HTTP request by inserting the timeout
|
||||
parameter, if not already present."""
|
||||
kwargs.setdefault('timeout', self.timeout)
|
||||
return kwargs
|
||||
|
||||
@update_headers
|
||||
def _post(self, url, **kwargs):
|
||||
return self.post(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _get(self, url, **kwargs):
|
||||
return self.get(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _put(self, url, **kwargs):
|
||||
return self.put(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
@update_headers
|
||||
def _delete(self, url, **kwargs):
|
||||
return self.delete(url, **self._set_request_timeout(kwargs))
|
||||
|
||||
def _url(self, pathfmt, *args, **kwargs):
|
||||
for arg in args:
|
||||
if not isinstance(arg, str):
|
||||
raise ValueError(
|
||||
f'Expected a string but found {arg} ({type(arg)}) instead'
|
||||
)
|
||||
|
||||
quote_f = partial(urllib.parse.quote, safe="/:")
|
||||
args = map(quote_f, args)
|
||||
|
||||
formatted_path = pathfmt.format(*args)
|
||||
if kwargs.get('versioned_api', True):
|
||||
return f'{self.base_url}/v{self._version}{formatted_path}'
|
||||
else:
|
||||
return f'{self.base_url}{formatted_path}'
|
||||
|
||||
def _raise_for_status(self, response):
|
||||
"""Raises stored :class:`APIError`, if one occurred."""
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
raise create_api_error_from_http_exception(e) from e
|
||||
|
||||
def _result(self, response, json=False, binary=False):
|
||||
assert not (json and binary)
|
||||
self._raise_for_status(response)
|
||||
|
||||
if json:
|
||||
return response.json()
|
||||
if binary:
|
||||
return response.content
|
||||
return response.text
|
||||
|
||||
def _post_json(self, url, data, **kwargs):
|
||||
# Go <1.1 can't unserialize null to a string
|
||||
# so we do this disgusting thing here.
|
||||
data2 = {}
|
||||
if data is not None and isinstance(data, dict):
|
||||
for k, v in iter(data.items()):
|
||||
if v is not None:
|
||||
data2[k] = v
|
||||
elif data is not None:
|
||||
data2 = data
|
||||
|
||||
if 'headers' not in kwargs:
|
||||
kwargs['headers'] = {}
|
||||
kwargs['headers']['Content-Type'] = 'application/json'
|
||||
return self._post(url, data=json.dumps(data2), **kwargs)
|
||||
|
||||
def _attach_params(self, override=None):
|
||||
return override or {
|
||||
'stdout': 1,
|
||||
'stderr': 1,
|
||||
'stream': 1
|
||||
}
|
||||
|
||||
@check_resource('container')
|
||||
def _attach_websocket(self, container, params=None):
|
||||
url = self._url("/containers/{0}/attach/ws", container)
|
||||
req = requests.Request("POST", url, params=self._attach_params(params))
|
||||
full_url = req.prepare().url
|
||||
full_url = full_url.replace("http://", "ws://", 1)
|
||||
full_url = full_url.replace("https://", "wss://", 1)
|
||||
return self._create_websocket_connection(full_url)
|
||||
|
||||
def _create_websocket_connection(self, url):
|
||||
try:
|
||||
import websocket
|
||||
return websocket.create_connection(url)
|
||||
except ImportError as ie:
|
||||
raise DockerException(
|
||||
'The `websocket-client` library is required '
|
||||
'for using websocket connections. '
|
||||
'You can install the `docker` library '
|
||||
'with the [websocket] extra to install it.'
|
||||
) from ie
|
||||
|
||||
def _get_raw_response_socket(self, response):
|
||||
self._raise_for_status(response)
|
||||
if self.base_url == "http+docker://localnpipe":
|
||||
sock = response.raw._fp.fp.raw.sock
|
||||
elif self.base_url.startswith('http+docker://ssh'):
|
||||
sock = response.raw._fp.fp.channel
|
||||
else:
|
||||
sock = response.raw._fp.fp.raw
|
||||
if self.base_url.startswith("https://"):
|
||||
sock = sock._sock
|
||||
try:
|
||||
# Keep a reference to the response to stop it being garbage
|
||||
# collected. If the response is garbage collected, it will
|
||||
# close TLS sockets.
|
||||
sock._response = response
|
||||
except AttributeError:
|
||||
# UNIX sockets can't have attributes set on them, but that's
|
||||
# fine because we won't be doing TLS over them
|
||||
pass
|
||||
|
||||
return sock
|
||||
|
||||
def _stream_helper(self, response, decode=False):
|
||||
"""Generator for data coming from a chunked-encoded HTTP response."""
|
||||
|
||||
if response.raw._fp.chunked:
|
||||
if decode:
|
||||
yield from json_stream(self._stream_helper(response, False))
|
||||
else:
|
||||
reader = response.raw
|
||||
while not reader.closed:
|
||||
# this read call will block until we get a chunk
|
||||
data = reader.read(1)
|
||||
if not data:
|
||||
break
|
||||
if reader._fp.chunk_left:
|
||||
data += reader.read(reader._fp.chunk_left)
|
||||
yield data
|
||||
else:
|
||||
# Response isn't chunked, meaning we probably
|
||||
# encountered an error immediately
|
||||
yield self._result(response, json=decode)
|
||||
|
||||
def _multiplexed_buffer_helper(self, response):
|
||||
"""A generator of multiplexed data blocks read from a buffered
|
||||
response."""
|
||||
buf = self._result(response, binary=True)
|
||||
buf_length = len(buf)
|
||||
walker = 0
|
||||
while True:
|
||||
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
|
||||
break
|
||||
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
|
||||
_, length = struct.unpack_from('>BxxxL', header)
|
||||
start = walker + STREAM_HEADER_SIZE_BYTES
|
||||
end = start + length
|
||||
walker = end
|
||||
yield buf[start:end]
|
||||
|
||||
def _multiplexed_response_stream_helper(self, response):
|
||||
"""A generator of multiplexed data blocks coming from a response
|
||||
stream."""
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
while True:
|
||||
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
|
||||
if not header:
|
||||
break
|
||||
_, length = struct.unpack('>BxxxL', header)
|
||||
if not length:
|
||||
continue
|
||||
data = response.raw.read(length)
|
||||
if not data:
|
||||
break
|
||||
yield data
|
||||
|
||||
def _stream_raw_result(self, response, chunk_size=1, decode=True):
|
||||
''' Stream result for TTY-enabled container and raw binary data'''
|
||||
self._raise_for_status(response)
|
||||
|
||||
# Disable timeout on the underlying socket to prevent
|
||||
# Read timed out(s) for long running processes
|
||||
socket = self._get_raw_response_socket(response)
|
||||
self._disable_socket_timeout(socket)
|
||||
|
||||
yield from response.iter_content(chunk_size, decode)
|
||||
|
||||
def _read_from_socket(self, response, stream, tty=True, demux=False):
|
||||
"""Consume all data from the socket, close the response and return the
|
||||
data. If stream=True, then a generator is returned instead and the
|
||||
caller is responsible for closing the response.
|
||||
"""
|
||||
socket = self._get_raw_response_socket(response)
|
||||
|
||||
gen = frames_iter(socket, tty)
|
||||
|
||||
if demux:
|
||||
# The generator will output tuples (stdout, stderr)
|
||||
gen = (demux_adaptor(*frame) for frame in gen)
|
||||
else:
|
||||
# The generator will output strings
|
||||
gen = (data for (_, data) in gen)
|
||||
|
||||
if stream:
|
||||
return gen
|
||||
else:
|
||||
try:
|
||||
# Wait for all frames, concatenate them, and return the result
|
||||
return consume_socket_output(gen, demux=demux)
|
||||
finally:
|
||||
response.close()
|
||||
|
||||
def _disable_socket_timeout(self, socket):
|
||||
""" Depending on the combination of python version and whether we're
|
||||
connecting over http or https, we might need to access _sock, which
|
||||
may or may not exist; or we may need to just settimeout on socket
|
||||
itself, which also may or may not have settimeout on it. To avoid
|
||||
missing the correct one, we try both.
|
||||
|
||||
We also do not want to set the timeout if it is already disabled, as
|
||||
you run the risk of changing a socket that was non-blocking to
|
||||
blocking, for example when using gevent.
|
||||
"""
|
||||
sockets = [socket, getattr(socket, '_sock', None)]
|
||||
|
||||
for s in sockets:
|
||||
if not hasattr(s, 'settimeout'):
|
||||
continue
|
||||
|
||||
timeout = -1
|
||||
|
||||
if hasattr(s, 'gettimeout'):
|
||||
timeout = s.gettimeout()
|
||||
|
||||
# Don't change the timeout if it is already disabled.
|
||||
if timeout is None or timeout == 0.0:
|
||||
continue
|
||||
|
||||
s.settimeout(None)
|
||||
|
||||
@check_resource('container')
|
||||
def _check_is_tty(self, container):
|
||||
cont = self.inspect_container(container)
|
||||
return cont['Config']['Tty']
|
||||
|
||||
def _get_result(self, container, stream, res):
|
||||
return self._get_result_tty(stream, res, self._check_is_tty(container))
|
||||
|
||||
def _get_result_tty(self, stream, res, is_tty):
|
||||
# We should also use raw streaming (without keep-alives)
|
||||
# if we're dealing with a tty-enabled container.
|
||||
if is_tty:
|
||||
return self._stream_raw_result(res) if stream else \
|
||||
self._result(res, binary=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
sep = b''
|
||||
if stream:
|
||||
return self._multiplexed_response_stream_helper(res)
|
||||
else:
|
||||
return sep.join(
|
||||
list(self._multiplexed_buffer_helper(res))
|
||||
)
|
||||
|
||||
def _unmount(self, *args):
|
||||
for proto in args:
|
||||
self.adapters.pop(proto)
|
||||
|
||||
def get_adapter(self, url):
|
||||
try:
|
||||
return super().get_adapter(url)
|
||||
except requests.exceptions.InvalidSchema as e:
|
||||
if self._custom_adapter:
|
||||
return self._custom_adapter
|
||||
else:
|
||||
raise e
|
||||
|
||||
@property
|
||||
def api_version(self):
|
||||
return self._version
|
||||
|
||||
def reload_config(self, dockercfg_path=None):
|
||||
"""
|
||||
Force a reload of the auth configuration
|
||||
|
||||
Args:
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self._auth_configs = auth.load_config(
|
||||
dockercfg_path, credstore_env=self.credstore_env
|
||||
)
|
||||
@@ -0,0 +1,92 @@
|
||||
import base64
|
||||
|
||||
from .. import utils
|
||||
|
||||
|
||||
class ConfigApiMixin:
|
||||
@utils.minimum_version('1.30')
|
||||
def create_config(self, name, data, labels=None, templating=None):
|
||||
"""
|
||||
Create a config
|
||||
|
||||
Args:
|
||||
name (string): Name of the config
|
||||
data (bytes): Config data to be stored
|
||||
labels (dict): A mapping of labels to assign to the config
|
||||
templating (dict): dictionary containing the name of the
|
||||
templating driver to be used expressed as
|
||||
{ name: <templating_driver_name>}
|
||||
|
||||
Returns (dict): ID of the newly created config
|
||||
"""
|
||||
if not isinstance(data, bytes):
|
||||
data = data.encode('utf-8')
|
||||
|
||||
data = base64.b64encode(data)
|
||||
data = data.decode('ascii')
|
||||
body = {
|
||||
'Data': data,
|
||||
'Name': name,
|
||||
'Labels': labels,
|
||||
'Templating': templating
|
||||
}
|
||||
|
||||
url = self._url('/configs/create')
|
||||
return self._result(
|
||||
self._post_json(url, data=body), True
|
||||
)
|
||||
|
||||
@utils.minimum_version('1.30')
|
||||
@utils.check_resource('id')
|
||||
def inspect_config(self, id):
|
||||
"""
|
||||
Retrieve config metadata
|
||||
|
||||
Args:
|
||||
id (string): Full ID of the config to inspect
|
||||
|
||||
Returns (dict): A dictionary of metadata
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
if no config with that ID exists
|
||||
"""
|
||||
url = self._url('/configs/{0}', id)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.30')
|
||||
@utils.check_resource('id')
|
||||
def remove_config(self, id):
|
||||
"""
|
||||
Remove a config
|
||||
|
||||
Args:
|
||||
id (string): Full ID of the config to remove
|
||||
|
||||
Returns (boolean): True if successful
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
if no config with that ID exists
|
||||
"""
|
||||
url = self._url('/configs/{0}', id)
|
||||
res = self._delete(url)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.30')
|
||||
def configs(self, filters=None):
|
||||
"""
|
||||
List configs
|
||||
|
||||
Args:
|
||||
filters (dict): A map of filters to process on the configs
|
||||
list. Available filters: ``names``
|
||||
|
||||
Returns (list): A list of configs
|
||||
"""
|
||||
url = self._url('/configs')
|
||||
params = {}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
return self._result(self._get(url, params=params), True)
|
||||
1348
backend/venv/lib/python3.9/site-packages/docker/api/container.py
Normal file
1348
backend/venv/lib/python3.9/site-packages/docker/api/container.py
Normal file
File diff suppressed because it is too large
Load Diff
181
backend/venv/lib/python3.9/site-packages/docker/api/daemon.py
Normal file
181
backend/venv/lib/python3.9/site-packages/docker/api/daemon.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
from .. import auth, types, utils
|
||||
|
||||
|
||||
class DaemonApiMixin:
|
||||
@utils.minimum_version('1.25')
|
||||
def df(self):
|
||||
"""
|
||||
Get data usage information.
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary representing different resource categories
|
||||
and their respective data usage.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/system/df')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
def events(self, since=None, until=None, filters=None, decode=None):
|
||||
"""
|
||||
Get real-time events from the server. Similar to the ``docker events``
|
||||
command.
|
||||
|
||||
Args:
|
||||
since (UTC datetime or int): Get events from this point
|
||||
until (UTC datetime or int): Get events until this point
|
||||
filters (dict): Filter the events by event time, container or image
|
||||
decode (bool): If set to true, stream will be decoded into dicts on
|
||||
the fly. False by default.
|
||||
|
||||
Returns:
|
||||
A :py:class:`docker.types.daemon.CancellableStream` generator
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> for event in client.events(decode=True)
|
||||
... print(event)
|
||||
{u'from': u'image/with:tag',
|
||||
u'id': u'container-id',
|
||||
u'status': u'start',
|
||||
u'time': 1423339459}
|
||||
...
|
||||
|
||||
or
|
||||
|
||||
>>> events = client.events()
|
||||
>>> for event in events:
|
||||
... print(event)
|
||||
>>> # and cancel from another thread
|
||||
>>> events.close()
|
||||
"""
|
||||
|
||||
if isinstance(since, datetime):
|
||||
since = utils.datetime_to_timestamp(since)
|
||||
|
||||
if isinstance(until, datetime):
|
||||
until = utils.datetime_to_timestamp(until)
|
||||
|
||||
if filters:
|
||||
filters = utils.convert_filters(filters)
|
||||
|
||||
params = {
|
||||
'since': since,
|
||||
'until': until,
|
||||
'filters': filters
|
||||
}
|
||||
url = self._url('/events')
|
||||
|
||||
response = self._get(url, params=params, stream=True, timeout=None)
|
||||
stream = self._stream_helper(response, decode=decode)
|
||||
|
||||
return types.CancellableStream(stream, response)
|
||||
|
||||
def info(self):
|
||||
"""
|
||||
Display system-wide information. Identical to the ``docker info``
|
||||
command.
|
||||
|
||||
Returns:
|
||||
(dict): The info as a dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url("/info")), True)
|
||||
|
||||
def login(self, username, password=None, email=None, registry=None,
|
||||
reauth=False, dockercfg_path=None):
|
||||
"""
|
||||
Authenticate with a registry. Similar to the ``docker login`` command.
|
||||
|
||||
Args:
|
||||
username (str): The registry username
|
||||
password (str): The plaintext password
|
||||
email (str): The email for the registry account
|
||||
registry (str): URL to the registry. E.g.
|
||||
``https://index.docker.io/v1/``
|
||||
reauth (bool): Whether or not to refresh existing authentication on
|
||||
the Docker server.
|
||||
dockercfg_path (str): Use a custom path for the Docker config file
|
||||
(default ``$HOME/.docker/config.json`` if present,
|
||||
otherwise ``$HOME/.dockercfg``)
|
||||
|
||||
Returns:
|
||||
(dict): The response from the login request
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
# If we don't have any auth data so far, try reloading the config file
|
||||
# one more time in case anything showed up in there.
|
||||
# If dockercfg_path is passed check to see if the config file exists,
|
||||
# if so load that config.
|
||||
if dockercfg_path and os.path.exists(dockercfg_path):
|
||||
self._auth_configs = auth.load_config(
|
||||
dockercfg_path, credstore_env=self.credstore_env
|
||||
)
|
||||
elif not self._auth_configs or self._auth_configs.is_empty:
|
||||
self._auth_configs = auth.load_config(
|
||||
credstore_env=self.credstore_env
|
||||
)
|
||||
|
||||
authcfg = self._auth_configs.resolve_authconfig(registry)
|
||||
# If we found an existing auth config for this registry and username
|
||||
# combination, we can return it immediately unless reauth is requested.
|
||||
if authcfg and authcfg.get('username', None) == username \
|
||||
and not reauth:
|
||||
return authcfg
|
||||
|
||||
req_data = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'email': email,
|
||||
'serveraddress': registry,
|
||||
}
|
||||
|
||||
response = self._post_json(self._url('/auth'), data=req_data)
|
||||
if response.status_code == 200:
|
||||
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
|
||||
return self._result(response, json=True)
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
Checks the server is responsive. An exception will be raised if it
|
||||
isn't responding.
|
||||
|
||||
Returns:
|
||||
(bool) The response from the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(self._get(self._url('/_ping'))) == 'OK'
|
||||
|
||||
def version(self, api_version=True):
|
||||
"""
|
||||
Returns version information from the server. Similar to the ``docker
|
||||
version`` command.
|
||||
|
||||
Returns:
|
||||
(dict): The server version information
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/version", versioned_api=api_version)
|
||||
return self._result(self._get(url), json=True)
|
||||
176
backend/venv/lib/python3.9/site-packages/docker/api/exec_api.py
Normal file
176
backend/venv/lib/python3.9/site-packages/docker/api/exec_api.py
Normal file
@@ -0,0 +1,176 @@
|
||||
from .. import errors, utils
|
||||
from ..types import CancellableStream
|
||||
|
||||
|
||||
class ExecApiMixin:
|
||||
@utils.check_resource('container')
|
||||
def exec_create(self, container, cmd, stdout=True, stderr=True,
|
||||
stdin=False, tty=False, privileged=False, user='',
|
||||
environment=None, workdir=None, detach_keys=None):
|
||||
"""
|
||||
Sets up an exec instance in a running container.
|
||||
|
||||
Args:
|
||||
container (str): Target container where exec instance will be
|
||||
created
|
||||
cmd (str or list): Command to be executed
|
||||
stdout (bool): Attach to stdout. Default: ``True``
|
||||
stderr (bool): Attach to stderr. Default: ``True``
|
||||
stdin (bool): Attach to stdin. Default: ``False``
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
privileged (bool): Run as privileged.
|
||||
user (str): User to execute command as. Default: root
|
||||
environment (dict or list): A dictionary or a list of strings in
|
||||
the following format ``["PASSWORD=xxx"]`` or
|
||||
``{"PASSWORD": "xxx"}``.
|
||||
workdir (str): Path to working directory for this exec session
|
||||
detach_keys (str): Override the key sequence for detaching
|
||||
a container. Format is a single character `[a-Z]`
|
||||
or `ctrl-<value>` where `<value>` is one of:
|
||||
`a-z`, `@`, `^`, `[`, `,` or `_`.
|
||||
~/.docker/config.json is used by default.
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary with an exec ``Id`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
if environment is not None and utils.version_lt(self._version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'Setting environment for exec is not supported in API < 1.25'
|
||||
)
|
||||
|
||||
if isinstance(cmd, str):
|
||||
cmd = utils.split_command(cmd)
|
||||
|
||||
if isinstance(environment, dict):
|
||||
environment = utils.utils.format_environment(environment)
|
||||
|
||||
data = {
|
||||
'Container': container,
|
||||
'User': user,
|
||||
'Privileged': privileged,
|
||||
'Tty': tty,
|
||||
'AttachStdin': stdin,
|
||||
'AttachStdout': stdout,
|
||||
'AttachStderr': stderr,
|
||||
'Cmd': cmd,
|
||||
'Env': environment,
|
||||
}
|
||||
|
||||
if workdir is not None:
|
||||
if utils.version_lt(self._version, '1.35'):
|
||||
raise errors.InvalidVersion(
|
||||
'workdir is not supported for API version < 1.35'
|
||||
)
|
||||
data['WorkingDir'] = workdir
|
||||
|
||||
if detach_keys:
|
||||
data['detachKeys'] = detach_keys
|
||||
elif 'detachKeys' in self._general_configs:
|
||||
data['detachKeys'] = self._general_configs['detachKeys']
|
||||
|
||||
url = self._url('/containers/{0}/exec', container)
|
||||
res = self._post_json(url, data=data)
|
||||
return self._result(res, True)
|
||||
|
||||
def exec_inspect(self, exec_id):
|
||||
"""
|
||||
Return low-level information about an exec command.
|
||||
|
||||
Args:
|
||||
exec_id (str): ID of the exec instance
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary of values returned by the endpoint.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
if isinstance(exec_id, dict):
|
||||
exec_id = exec_id.get('Id')
|
||||
res = self._get(self._url("/exec/{0}/json", exec_id))
|
||||
return self._result(res, True)
|
||||
|
||||
def exec_resize(self, exec_id, height=None, width=None):
|
||||
"""
|
||||
Resize the tty session used by the specified exec command.
|
||||
|
||||
Args:
|
||||
exec_id (str): ID of the exec instance
|
||||
height (int): Height of tty session
|
||||
width (int): Width of tty session
|
||||
"""
|
||||
|
||||
if isinstance(exec_id, dict):
|
||||
exec_id = exec_id.get('Id')
|
||||
|
||||
params = {'h': height, 'w': width}
|
||||
url = self._url("/exec/{0}/resize", exec_id)
|
||||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.check_resource('exec_id')
|
||||
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
|
||||
socket=False, demux=False):
|
||||
"""
|
||||
Start a previously set up exec instance.
|
||||
|
||||
Args:
|
||||
exec_id (str): ID of the exec instance
|
||||
detach (bool): If true, detach from the exec command.
|
||||
Default: False
|
||||
tty (bool): Allocate a pseudo-TTY. Default: False
|
||||
stream (bool): Return response data progressively as an iterator
|
||||
of strings, rather than a single string.
|
||||
socket (bool): Return the connection socket to allow custom
|
||||
read/write operations. Must be closed by the caller when done.
|
||||
demux (bool): Return stdout and stderr separately
|
||||
|
||||
Returns:
|
||||
|
||||
(generator or str or tuple): If ``stream=True``, a generator
|
||||
yielding response chunks. If ``socket=True``, a socket object for
|
||||
the connection. A string containing response data otherwise. If
|
||||
``demux=True``, a tuple with two elements of type byte: stdout and
|
||||
stderr.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
# we want opened socket if socket == True
|
||||
|
||||
data = {
|
||||
'Tty': tty,
|
||||
'Detach': detach
|
||||
}
|
||||
|
||||
headers = {} if detach else {
|
||||
'Connection': 'Upgrade',
|
||||
'Upgrade': 'tcp'
|
||||
}
|
||||
|
||||
res = self._post_json(
|
||||
self._url('/exec/{0}/start', exec_id),
|
||||
headers=headers,
|
||||
data=data,
|
||||
stream=True
|
||||
)
|
||||
if detach:
|
||||
try:
|
||||
return self._result(res)
|
||||
finally:
|
||||
res.close()
|
||||
if socket:
|
||||
return self._get_raw_response_socket(res)
|
||||
|
||||
output = self._read_from_socket(res, stream, tty=tty, demux=demux)
|
||||
if stream:
|
||||
return CancellableStream(output, res)
|
||||
else:
|
||||
return output
|
||||
601
backend/venv/lib/python3.9/site-packages/docker/api/image.py
Normal file
601
backend/venv/lib/python3.9/site-packages/docker/api/image.py
Normal file
@@ -0,0 +1,601 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from .. import auth, errors, utils
|
||||
from ..constants import DEFAULT_DATA_CHUNK_SIZE
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ImageApiMixin:
|
||||
|
||||
@utils.check_resource('image')
|
||||
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
|
||||
"""
|
||||
Get a tarball of an image. Similar to the ``docker save`` command.
|
||||
|
||||
Args:
|
||||
image (str): Image name to get
|
||||
chunk_size (int): The number of bytes returned by each iteration
|
||||
of the generator. If ``None``, data will be streamed as it is
|
||||
received. Default: 2 MB
|
||||
|
||||
Returns:
|
||||
(generator): A stream of raw archive data.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> image = client.api.get_image("busybox:latest")
|
||||
>>> f = open('/tmp/busybox-latest.tar', 'wb')
|
||||
>>> for chunk in image:
|
||||
>>> f.write(chunk)
|
||||
>>> f.close()
|
||||
"""
|
||||
res = self._get(self._url("/images/{0}/get", image), stream=True)
|
||||
return self._stream_raw_result(res, chunk_size, False)
|
||||
|
||||
@utils.check_resource('image')
|
||||
def history(self, image):
|
||||
"""
|
||||
Show the history of an image.
|
||||
|
||||
Args:
|
||||
image (str): The image to show history for
|
||||
|
||||
Returns:
|
||||
(list): The history of the image
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
res = self._get(self._url("/images/{0}/history", image))
|
||||
return self._result(res, True)
|
||||
|
||||
def images(self, name=None, quiet=False, all=False, filters=None):
|
||||
"""
|
||||
List images. Similar to the ``docker images`` command.
|
||||
|
||||
Args:
|
||||
name (str): Only show images belonging to the repository ``name``
|
||||
quiet (bool): Only return numeric IDs as a list.
|
||||
all (bool): Show intermediate image layers. By default, these are
|
||||
filtered out.
|
||||
filters (dict): Filters to be processed on the image list.
|
||||
Available filters:
|
||||
- ``dangling`` (bool)
|
||||
- `label` (str|list): format either ``"key"``, ``"key=value"``
|
||||
or a list of such.
|
||||
|
||||
Returns:
|
||||
(dict or list): A list if ``quiet=True``, otherwise a dict.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'only_ids': 1 if quiet else 0,
|
||||
'all': 1 if all else 0,
|
||||
}
|
||||
if name:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
# only use "filter" on API 1.24 and under, as it is deprecated
|
||||
params['filter'] = name
|
||||
else:
|
||||
if filters:
|
||||
filters['reference'] = name
|
||||
else:
|
||||
filters = {'reference': name}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
res = self._result(self._get(self._url("/images/json"), params=params),
|
||||
True)
|
||||
if quiet:
|
||||
return [x['Id'] for x in res]
|
||||
return res
|
||||
|
||||
def import_image(self, src=None, repository=None, tag=None, image=None,
|
||||
changes=None, stream_src=False):
|
||||
"""
|
||||
Import an image. Similar to the ``docker import`` command.
|
||||
|
||||
If ``src`` is a string or unicode string, it will first be treated as a
|
||||
path to a tarball on the local system. If there is an error reading
|
||||
from that file, ``src`` will be treated as a URL instead to fetch the
|
||||
image from. You can also pass an open file handle as ``src``, in which
|
||||
case the data will be read from that file.
|
||||
|
||||
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
|
||||
be taken as the name of an existing image to import from.
|
||||
|
||||
Args:
|
||||
src (str or file): Path to tarfile, URL, or file-like object
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
image (str): Use another image like the ``FROM`` Dockerfile
|
||||
parameter
|
||||
"""
|
||||
if not (src or image):
|
||||
raise errors.DockerException(
|
||||
'Must specify src or image to import from'
|
||||
)
|
||||
u = self._url('/images/create')
|
||||
|
||||
params = _import_image_params(
|
||||
repository, tag, image,
|
||||
src=(src if isinstance(src, str) else None),
|
||||
changes=changes
|
||||
)
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
|
||||
if image or params.get('fromSrc') != '-': # from image or URL
|
||||
return self._result(
|
||||
self._post(u, data=None, params=params)
|
||||
)
|
||||
elif isinstance(src, str): # from file path
|
||||
with open(src, 'rb') as f:
|
||||
return self._result(
|
||||
self._post(
|
||||
u, data=f, params=params, headers=headers, timeout=None
|
||||
)
|
||||
)
|
||||
else: # from raw data
|
||||
if stream_src:
|
||||
headers['Transfer-Encoding'] = 'chunked'
|
||||
return self._result(
|
||||
self._post(u, data=src, params=params, headers=headers)
|
||||
)
|
||||
|
||||
def import_image_from_data(self, data, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
|
||||
allows importing in-memory bytes data.
|
||||
|
||||
Args:
|
||||
data (bytes collection): Bytes collection containing valid tar data
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
"""
|
||||
|
||||
u = self._url('/images/create')
|
||||
params = _import_image_params(
|
||||
repository, tag, src='-', changes=changes
|
||||
)
|
||||
headers = {'Content-Type': 'application/tar'}
|
||||
return self._result(
|
||||
self._post(
|
||||
u, data=data, params=params, headers=headers, timeout=None
|
||||
)
|
||||
)
|
||||
|
||||
def import_image_from_file(self, filename, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
|
||||
supports importing from a tar file on disk.
|
||||
|
||||
Args:
|
||||
filename (str): Full path to a tar file.
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
|
||||
Raises:
|
||||
IOError: File does not exist.
|
||||
"""
|
||||
|
||||
return self.import_image(
|
||||
src=filename, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_stream(self, stream, repository=None, tag=None,
|
||||
changes=None):
|
||||
return self.import_image(
|
||||
src=stream, stream_src=True, repository=repository, tag=tag,
|
||||
changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_url(self, url, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
|
||||
supports importing from a URL.
|
||||
|
||||
Args:
|
||||
url (str): A URL pointing to a tar file.
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
"""
|
||||
return self.import_image(
|
||||
src=url, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
def import_image_from_image(self, image, repository=None, tag=None,
|
||||
changes=None):
|
||||
"""
|
||||
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
|
||||
supports importing from another image, like the ``FROM`` Dockerfile
|
||||
parameter.
|
||||
|
||||
Args:
|
||||
image (str): Image name to import from
|
||||
repository (str): The repository to create
|
||||
tag (str): The tag to apply
|
||||
"""
|
||||
return self.import_image(
|
||||
image=image, repository=repository, tag=tag, changes=changes
|
||||
)
|
||||
|
||||
@utils.check_resource('image')
|
||||
def inspect_image(self, image):
|
||||
"""
|
||||
Get detailed information about an image. Similar to the ``docker
|
||||
inspect`` command, but only for images.
|
||||
|
||||
Args:
|
||||
image (str): The image to inspect
|
||||
|
||||
Returns:
|
||||
(dict): Similar to the output of ``docker inspect``, but as a
|
||||
single dict
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
return self._result(
|
||||
self._get(self._url("/images/{0}/json", image)), True
|
||||
)
|
||||
|
||||
@utils.minimum_version('1.30')
|
||||
@utils.check_resource('image')
|
||||
def inspect_distribution(self, image, auth_config=None):
|
||||
"""
|
||||
Get image digest and platform information by contacting the registry.
|
||||
|
||||
Args:
|
||||
image (str): The image name to inspect
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
|
||||
Returns:
|
||||
(dict): A dict containing distribution data
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
registry, _ = auth.resolve_repository_name(image)
|
||||
|
||||
headers = {}
|
||||
if auth_config is None:
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
else:
|
||||
log.debug('Sending supplied auth config')
|
||||
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
|
||||
|
||||
url = self._url("/distribution/{0}/json", image)
|
||||
|
||||
return self._result(
|
||||
self._get(url, headers=headers), True
|
||||
)
|
||||
|
||||
def load_image(self, data, quiet=None):
|
||||
"""
|
||||
Load an image that was previously saved using
|
||||
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
|
||||
save``). Similar to ``docker load``.
|
||||
|
||||
Args:
|
||||
data (binary): Image data to be loaded.
|
||||
quiet (boolean): Suppress progress details in response.
|
||||
|
||||
Returns:
|
||||
(generator): Progress output as JSON objects. Only available for
|
||||
API version >= 1.23
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {}
|
||||
|
||||
if quiet is not None:
|
||||
if utils.version_lt(self._version, '1.23'):
|
||||
raise errors.InvalidVersion(
|
||||
'quiet is not supported in API version < 1.23'
|
||||
)
|
||||
params['quiet'] = quiet
|
||||
|
||||
res = self._post(
|
||||
self._url("/images/load"), data=data, params=params, stream=True
|
||||
)
|
||||
if utils.version_gte(self._version, '1.23'):
|
||||
return self._stream_helper(res, decode=True)
|
||||
|
||||
self._raise_for_status(res)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def prune_images(self, filters=None):
|
||||
"""
|
||||
Delete unused images
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the prune list.
|
||||
Available filters:
|
||||
- dangling (bool): When set to true (or 1), prune only
|
||||
unused and untagged images.
|
||||
|
||||
Returns:
|
||||
(dict): A dict containing a list of deleted image IDs and
|
||||
the amount of disk space reclaimed in bytes.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url("/images/prune")
|
||||
params = {}
|
||||
if filters is not None:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
def pull(self, repository, tag=None, stream=False, auth_config=None,
|
||||
decode=False, platform=None, all_tags=False):
|
||||
"""
|
||||
Pulls an image. Similar to the ``docker pull`` command.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to pull
|
||||
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
|
||||
is set to ``latest``.
|
||||
stream (bool): Stream the output as a generator. Make sure to
|
||||
consume the generator, otherwise pull might get cancelled.
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
decode (bool): Decode the JSON data from the server into dicts.
|
||||
Only applies with ``stream=True``
|
||||
platform (str): Platform in the format ``os[/arch[/variant]]``
|
||||
all_tags (bool): Pull all image tags, the ``tag`` parameter is
|
||||
ignored.
|
||||
|
||||
Returns:
|
||||
(generator or str): The output
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> resp = client.api.pull('busybox', stream=True, decode=True)
|
||||
... for line in resp:
|
||||
... print(json.dumps(line, indent=4))
|
||||
{
|
||||
"status": "Pulling image (latest) from busybox",
|
||||
"progressDetail": {},
|
||||
"id": "e72ac664f4f0"
|
||||
}
|
||||
{
|
||||
"status": "Pulling image (latest) from busybox, endpoint: ...",
|
||||
"progressDetail": {},
|
||||
"id": "e72ac664f4f0"
|
||||
}
|
||||
|
||||
"""
|
||||
repository, image_tag = utils.parse_repository_tag(repository)
|
||||
tag = tag or image_tag or 'latest'
|
||||
|
||||
if all_tags:
|
||||
tag = None
|
||||
|
||||
registry, repo_name = auth.resolve_repository_name(repository)
|
||||
|
||||
params = {
|
||||
'tag': tag,
|
||||
'fromImage': repository
|
||||
}
|
||||
headers = {}
|
||||
|
||||
if auth_config is None:
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
else:
|
||||
log.debug('Sending supplied auth config')
|
||||
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
|
||||
|
||||
if platform is not None:
|
||||
if utils.version_lt(self._version, '1.32'):
|
||||
raise errors.InvalidVersion(
|
||||
'platform was only introduced in API version 1.32'
|
||||
)
|
||||
params['platform'] = platform
|
||||
|
||||
response = self._post(
|
||||
self._url('/images/create'), params=params, headers=headers,
|
||||
stream=stream, timeout=None
|
||||
)
|
||||
|
||||
self._raise_for_status(response)
|
||||
|
||||
if stream:
|
||||
return self._stream_helper(response, decode=decode)
|
||||
|
||||
return self._result(response)
|
||||
|
||||
def push(self, repository, tag=None, stream=False, auth_config=None,
|
||||
decode=False):
|
||||
"""
|
||||
Push an image or a repository to the registry. Similar to the ``docker
|
||||
push`` command.
|
||||
|
||||
Args:
|
||||
repository (str): The repository to push to
|
||||
tag (str): An optional tag to push
|
||||
stream (bool): Stream the output as a blocking generator
|
||||
auth_config (dict): Override the credentials that are found in the
|
||||
config for this request. ``auth_config`` should contain the
|
||||
``username`` and ``password`` keys to be valid.
|
||||
decode (bool): Decode the JSON data from the server into dicts.
|
||||
Only applies with ``stream=True``
|
||||
|
||||
Returns:
|
||||
(generator or str): The output from the server.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
>>> resp = client.api.push(
|
||||
... 'yourname/app',
|
||||
... stream=True,
|
||||
... decode=True,
|
||||
... )
|
||||
... for line in resp:
|
||||
... print(line)
|
||||
{'status': 'Pushing repository yourname/app (1 tags)'}
|
||||
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
|
||||
{'status': 'Image already pushed, skipping', 'progressDetail':{},
|
||||
'id': '511136ea3c5a'}
|
||||
...
|
||||
|
||||
"""
|
||||
if not tag:
|
||||
repository, tag = utils.parse_repository_tag(repository)
|
||||
registry, repo_name = auth.resolve_repository_name(repository)
|
||||
u = self._url("/images/{0}/push", repository)
|
||||
params = {
|
||||
'tag': tag
|
||||
}
|
||||
headers = {}
|
||||
|
||||
if auth_config is None:
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
else:
|
||||
log.debug('Sending supplied auth config')
|
||||
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
|
||||
|
||||
response = self._post_json(
|
||||
u, None, headers=headers, stream=stream, params=params
|
||||
)
|
||||
|
||||
self._raise_for_status(response)
|
||||
|
||||
if stream:
|
||||
return self._stream_helper(response, decode=decode)
|
||||
|
||||
return self._result(response)
|
||||
|
||||
@utils.check_resource('image')
|
||||
def remove_image(self, image, force=False, noprune=False):
|
||||
"""
|
||||
Remove an image. Similar to the ``docker rmi`` command.
|
||||
|
||||
Args:
|
||||
image (str): The image to remove
|
||||
force (bool): Force removal of the image
|
||||
noprune (bool): Do not delete untagged parents
|
||||
"""
|
||||
params = {'force': force, 'noprune': noprune}
|
||||
res = self._delete(self._url("/images/{0}", image), params=params)
|
||||
return self._result(res, True)
|
||||
|
||||
def search(self, term, limit=None):
|
||||
"""
|
||||
Search for images on Docker Hub. Similar to the ``docker search``
|
||||
command.
|
||||
|
||||
Args:
|
||||
term (str): A term to search for.
|
||||
limit (int): The maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
(list of dicts): The response of the search.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {'term': term}
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
|
||||
return self._result(
|
||||
self._get(self._url("/images/search"), params=params),
|
||||
True
|
||||
)
|
||||
|
||||
@utils.check_resource('image')
|
||||
def tag(self, image, repository, tag=None, force=False):
|
||||
"""
|
||||
Tag an image into a repository. Similar to the ``docker tag`` command.
|
||||
|
||||
Args:
|
||||
image (str): The image to tag
|
||||
repository (str): The repository to set for the tag
|
||||
tag (str): The tag name
|
||||
force (bool): Force
|
||||
|
||||
Returns:
|
||||
(bool): ``True`` if successful
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
|
||||
force=True)
|
||||
"""
|
||||
params = {
|
||||
'tag': tag,
|
||||
'repo': repository,
|
||||
'force': 1 if force else 0
|
||||
}
|
||||
url = self._url("/images/{0}/tag", image)
|
||||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
return res.status_code == 201
|
||||
|
||||
|
||||
def is_file(src):
|
||||
try:
|
||||
return (
|
||||
isinstance(src, str) and
|
||||
os.path.isfile(src)
|
||||
)
|
||||
except TypeError: # a data string will make isfile() raise a TypeError
|
||||
return False
|
||||
|
||||
|
||||
def _import_image_params(repo, tag, image=None, src=None,
|
||||
changes=None):
|
||||
params = {
|
||||
'repo': repo,
|
||||
'tag': tag,
|
||||
}
|
||||
if image:
|
||||
params['fromImage'] = image
|
||||
elif src and not is_file(src):
|
||||
params['fromSrc'] = src
|
||||
else:
|
||||
params['fromSrc'] = '-'
|
||||
|
||||
if changes:
|
||||
params['changes'] = changes
|
||||
|
||||
return params
|
||||
277
backend/venv/lib/python3.9/site-packages/docker/api/network.py
Normal file
277
backend/venv/lib/python3.9/site-packages/docker/api/network.py
Normal file
@@ -0,0 +1,277 @@
|
||||
from .. import utils
|
||||
from ..errors import InvalidVersion
|
||||
from ..utils import check_resource, minimum_version, version_lt
|
||||
|
||||
|
||||
class NetworkApiMixin:
|
||||
def networks(self, names=None, ids=None, filters=None):
|
||||
"""
|
||||
List networks. Similar to the ``docker network ls`` command.
|
||||
|
||||
Args:
|
||||
names (:py:class:`list`): List of names to filter by
|
||||
ids (:py:class:`list`): List of ids to filter by
|
||||
filters (dict): Filters to be processed on the network list.
|
||||
Available filters:
|
||||
- ``driver=[<driver-name>]`` Matches a network's driver.
|
||||
- ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
|
||||
such.
|
||||
- ``type=["custom"|"builtin"]`` Filters networks by type.
|
||||
|
||||
Returns:
|
||||
(dict): List of network objects.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
if filters is None:
|
||||
filters = {}
|
||||
if names:
|
||||
filters['name'] = names
|
||||
if ids:
|
||||
filters['id'] = ids
|
||||
params = {'filters': utils.convert_filters(filters)}
|
||||
url = self._url("/networks")
|
||||
res = self._get(url, params=params)
|
||||
return self._result(res, json=True)
|
||||
|
||||
def create_network(self, name, driver=None, options=None, ipam=None,
|
||||
check_duplicate=None, internal=False, labels=None,
|
||||
enable_ipv6=False, attachable=None, scope=None,
|
||||
ingress=None):
|
||||
"""
|
||||
Create a network. Similar to the ``docker network create``.
|
||||
|
||||
Args:
|
||||
name (str): Name of the network
|
||||
driver (str): Name of the driver used to create the network
|
||||
options (dict): Driver options as a key-value dictionary
|
||||
ipam (IPAMConfig): Optional custom IP scheme for the network.
|
||||
check_duplicate (bool): Request daemon to check for networks with
|
||||
same name. Default: ``None``.
|
||||
internal (bool): Restrict external access to the network. Default
|
||||
``False``.
|
||||
labels (dict): Map of labels to set on the network. Default
|
||||
``None``.
|
||||
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
|
||||
attachable (bool): If enabled, and the network is in the global
|
||||
scope, non-service containers on worker nodes will be able to
|
||||
connect to the network.
|
||||
scope (str): Specify the network's scope (``local``, ``global`` or
|
||||
``swarm``)
|
||||
ingress (bool): If set, create an ingress network which provides
|
||||
the routing-mesh in swarm mode.
|
||||
|
||||
Returns:
|
||||
(dict): The created network reference object
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
A network using the bridge driver:
|
||||
|
||||
>>> client.api.create_network("network1", driver="bridge")
|
||||
|
||||
You can also create more advanced networks with custom IPAM
|
||||
configurations. For example, setting the subnet to
|
||||
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> ipam_pool = docker.types.IPAMPool(
|
||||
subnet='192.168.52.0/24',
|
||||
gateway='192.168.52.254'
|
||||
)
|
||||
>>> ipam_config = docker.types.IPAMConfig(
|
||||
pool_configs=[ipam_pool]
|
||||
)
|
||||
>>> client.api.create_network("network1", driver="bridge",
|
||||
ipam=ipam_config)
|
||||
"""
|
||||
if options is not None and not isinstance(options, dict):
|
||||
raise TypeError('options must be a dictionary')
|
||||
|
||||
data = {
|
||||
'Name': name,
|
||||
'Driver': driver,
|
||||
'Options': options,
|
||||
'IPAM': ipam,
|
||||
'CheckDuplicate': check_duplicate,
|
||||
}
|
||||
|
||||
if labels is not None:
|
||||
if version_lt(self._version, '1.23'):
|
||||
raise InvalidVersion(
|
||||
'network labels were introduced in API 1.23'
|
||||
)
|
||||
if not isinstance(labels, dict):
|
||||
raise TypeError('labels must be a dictionary')
|
||||
data["Labels"] = labels
|
||||
|
||||
if enable_ipv6:
|
||||
if version_lt(self._version, '1.23'):
|
||||
raise InvalidVersion(
|
||||
'enable_ipv6 was introduced in API 1.23'
|
||||
)
|
||||
data['EnableIPv6'] = True
|
||||
|
||||
if internal:
|
||||
if version_lt(self._version, '1.22'):
|
||||
raise InvalidVersion('Internal networks are not '
|
||||
'supported in API version < 1.22')
|
||||
data['Internal'] = True
|
||||
|
||||
if attachable is not None:
|
||||
if version_lt(self._version, '1.24'):
|
||||
raise InvalidVersion(
|
||||
'attachable is not supported in API version < 1.24'
|
||||
)
|
||||
data['Attachable'] = attachable
|
||||
|
||||
if ingress is not None:
|
||||
if version_lt(self._version, '1.29'):
|
||||
raise InvalidVersion(
|
||||
'ingress is not supported in API version < 1.29'
|
||||
)
|
||||
|
||||
data['Ingress'] = ingress
|
||||
|
||||
if scope is not None:
|
||||
if version_lt(self._version, '1.30'):
|
||||
raise InvalidVersion(
|
||||
'scope is not supported in API version < 1.30'
|
||||
)
|
||||
data['Scope'] = scope
|
||||
|
||||
url = self._url("/networks/create")
|
||||
res = self._post_json(url, data=data)
|
||||
return self._result(res, json=True)
|
||||
|
||||
@minimum_version('1.25')
|
||||
def prune_networks(self, filters=None):
|
||||
"""
|
||||
Delete unused networks
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the prune list.
|
||||
|
||||
Returns:
|
||||
(dict): A dict containing a list of deleted network names and
|
||||
the amount of disk space reclaimed in bytes.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
url = self._url('/networks/prune')
|
||||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
@check_resource('net_id')
|
||||
def remove_network(self, net_id):
|
||||
"""
|
||||
Remove a network. Similar to the ``docker network rm`` command.
|
||||
|
||||
Args:
|
||||
net_id (str): The network's id
|
||||
"""
|
||||
url = self._url("/networks/{0}", net_id)
|
||||
res = self._delete(url)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@check_resource('net_id')
|
||||
def inspect_network(self, net_id, verbose=None, scope=None):
|
||||
"""
|
||||
Get detailed information about a network.
|
||||
|
||||
Args:
|
||||
net_id (str): ID of network
|
||||
verbose (bool): Show the service details across the cluster in
|
||||
swarm mode.
|
||||
scope (str): Filter the network by scope (``swarm``, ``global``
|
||||
or ``local``).
|
||||
"""
|
||||
params = {}
|
||||
if verbose is not None:
|
||||
if version_lt(self._version, '1.28'):
|
||||
raise InvalidVersion('verbose was introduced in API 1.28')
|
||||
params['verbose'] = verbose
|
||||
if scope is not None:
|
||||
if version_lt(self._version, '1.31'):
|
||||
raise InvalidVersion('scope was introduced in API 1.31')
|
||||
params['scope'] = scope
|
||||
|
||||
url = self._url("/networks/{0}", net_id)
|
||||
res = self._get(url, params=params)
|
||||
return self._result(res, json=True)
|
||||
|
||||
@check_resource('container')
|
||||
def connect_container_to_network(self, container, net_id,
|
||||
ipv4_address=None, ipv6_address=None,
|
||||
aliases=None, links=None,
|
||||
link_local_ips=None, driver_opt=None,
|
||||
mac_address=None):
|
||||
"""
|
||||
Connect a container to a network.
|
||||
|
||||
Args:
|
||||
container (str): container-id/name to be connected to the network
|
||||
net_id (str): network id
|
||||
aliases (:py:class:`list`): A list of aliases for this endpoint.
|
||||
Names in that list can be used within the network to reach the
|
||||
container. Defaults to ``None``.
|
||||
links (:py:class:`list`): A list of links for this endpoint.
|
||||
Containers declared in this list will be linked to this
|
||||
container. Defaults to ``None``.
|
||||
ipv4_address (str): The IP address of this container on the
|
||||
network, using the IPv4 protocol. Defaults to ``None``.
|
||||
ipv6_address (str): The IP address of this container on the
|
||||
network, using the IPv6 protocol. Defaults to ``None``.
|
||||
link_local_ips (:py:class:`list`): A list of link-local
|
||||
(IPv4/IPv6) addresses.
|
||||
mac_address (str): The MAC address of this container on the
|
||||
network. Defaults to ``None``.
|
||||
"""
|
||||
data = {
|
||||
"Container": container,
|
||||
"EndpointConfig": self.create_endpoint_config(
|
||||
aliases=aliases, links=links, ipv4_address=ipv4_address,
|
||||
ipv6_address=ipv6_address, link_local_ips=link_local_ips,
|
||||
driver_opt=driver_opt,
|
||||
mac_address=mac_address
|
||||
),
|
||||
}
|
||||
|
||||
url = self._url("/networks/{0}/connect", net_id)
|
||||
res = self._post_json(url, data=data)
|
||||
self._raise_for_status(res)
|
||||
|
||||
@check_resource('container')
|
||||
def disconnect_container_from_network(self, container, net_id,
|
||||
force=False):
|
||||
"""
|
||||
Disconnect a container from a network.
|
||||
|
||||
Args:
|
||||
container (str): container ID or name to be disconnected from the
|
||||
network
|
||||
net_id (str): network ID
|
||||
force (bool): Force the container to disconnect from a network.
|
||||
Default: ``False``
|
||||
"""
|
||||
data = {"Container": container}
|
||||
if force:
|
||||
if version_lt(self._version, '1.22'):
|
||||
raise InvalidVersion(
|
||||
'Forced disconnect was introduced in API 1.22'
|
||||
)
|
||||
data['Force'] = force
|
||||
url = self._url("/networks/{0}/disconnect", net_id)
|
||||
res = self._post_json(url, data=data)
|
||||
self._raise_for_status(res)
|
||||
261
backend/venv/lib/python3.9/site-packages/docker/api/plugin.py
Normal file
261
backend/venv/lib/python3.9/site-packages/docker/api/plugin.py
Normal file
@@ -0,0 +1,261 @@
|
||||
from .. import auth, utils
|
||||
|
||||
|
||||
class PluginApiMixin:
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('name')
|
||||
def configure_plugin(self, name, options):
|
||||
"""
|
||||
Configure a plugin.
|
||||
|
||||
Args:
|
||||
name (string): The name of the plugin. The ``:latest`` tag is
|
||||
optional, and is the default if omitted.
|
||||
options (dict): A key-value mapping of options
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/{0}/set', name)
|
||||
data = options
|
||||
if isinstance(data, dict):
|
||||
data = [f'{k}={v}' for k, v in data.items()]
|
||||
res = self._post_json(url, data=data)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def create_plugin(self, name, plugin_data_dir, gzip=False):
|
||||
"""
|
||||
Create a new plugin.
|
||||
|
||||
Args:
|
||||
name (string): The name of the plugin. The ``:latest`` tag is
|
||||
optional, and is the default if omitted.
|
||||
plugin_data_dir (string): Path to the plugin data directory.
|
||||
Plugin data directory must contain the ``config.json``
|
||||
manifest file and the ``rootfs`` directory.
|
||||
gzip (bool): Compress the context using gzip. Default: False
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/create')
|
||||
|
||||
with utils.create_archive(
|
||||
root=plugin_data_dir, gzip=gzip,
|
||||
files=set(utils.build.walk(plugin_data_dir, []))
|
||||
) as archv:
|
||||
res = self._post(url, params={'name': name}, data=archv)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def disable_plugin(self, name, force=False):
|
||||
"""
|
||||
Disable an installed plugin.
|
||||
|
||||
Args:
|
||||
name (string): The name of the plugin. The ``:latest`` tag is
|
||||
optional, and is the default if omitted.
|
||||
force (bool): To enable the force query parameter.
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/{0}/disable', name)
|
||||
res = self._post(url, params={'force': force})
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def enable_plugin(self, name, timeout=0):
|
||||
"""
|
||||
Enable an installed plugin.
|
||||
|
||||
Args:
|
||||
name (string): The name of the plugin. The ``:latest`` tag is
|
||||
optional, and is the default if omitted.
|
||||
timeout (int): Operation timeout (in seconds). Default: 0
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/{0}/enable', name)
|
||||
params = {'timeout': timeout}
|
||||
res = self._post(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def inspect_plugin(self, name):
|
||||
"""
|
||||
Retrieve plugin metadata.
|
||||
|
||||
Args:
|
||||
name (string): The name of the plugin. The ``:latest`` tag is
|
||||
optional, and is the default if omitted.
|
||||
|
||||
Returns:
|
||||
A dict containing plugin info
|
||||
"""
|
||||
url = self._url('/plugins/{0}/json', name)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def pull_plugin(self, remote, privileges, name=None):
|
||||
"""
|
||||
Pull and install a plugin. After the plugin is installed, it can be
|
||||
enabled using :py:meth:`~enable_plugin`.
|
||||
|
||||
Args:
|
||||
remote (string): Remote reference for the plugin to install.
|
||||
The ``:latest`` tag is optional, and is the default if
|
||||
omitted.
|
||||
privileges (:py:class:`list`): A list of privileges the user
|
||||
consents to grant to the plugin. Can be retrieved using
|
||||
:py:meth:`~plugin_privileges`.
|
||||
name (string): Local name for the pulled plugin. The
|
||||
``:latest`` tag is optional, and is the default if omitted.
|
||||
|
||||
Returns:
|
||||
An iterable object streaming the decoded API logs
|
||||
"""
|
||||
url = self._url('/plugins/pull')
|
||||
params = {
|
||||
'remote': remote,
|
||||
}
|
||||
if name:
|
||||
params['name'] = name
|
||||
|
||||
headers = {}
|
||||
registry, repo_name = auth.resolve_repository_name(remote)
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
response = self._post_json(
|
||||
url, params=params, headers=headers, data=privileges,
|
||||
stream=True
|
||||
)
|
||||
self._raise_for_status(response)
|
||||
return self._stream_helper(response, decode=True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def plugins(self):
|
||||
"""
|
||||
Retrieve a list of installed plugins.
|
||||
|
||||
Returns:
|
||||
A list of dicts, one per plugin
|
||||
"""
|
||||
url = self._url('/plugins')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def plugin_privileges(self, name):
|
||||
"""
|
||||
Retrieve list of privileges to be granted to a plugin.
|
||||
|
||||
Args:
|
||||
name (string): Name of the remote plugin to examine. The
|
||||
``:latest`` tag is optional, and is the default if omitted.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries representing the plugin's
|
||||
permissions
|
||||
|
||||
"""
|
||||
params = {
|
||||
'remote': name,
|
||||
}
|
||||
|
||||
headers = {}
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
|
||||
url = self._url('/plugins/privileges')
|
||||
return self._result(
|
||||
self._get(url, params=params, headers=headers), True
|
||||
)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('name')
|
||||
def push_plugin(self, name):
|
||||
"""
|
||||
Push a plugin to the registry.
|
||||
|
||||
Args:
|
||||
name (string): Name of the plugin to upload. The ``:latest``
|
||||
tag is optional, and is the default if omitted.
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/{0}/pull', name)
|
||||
|
||||
headers = {}
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
res = self._post(url, headers=headers)
|
||||
self._raise_for_status(res)
|
||||
return self._stream_helper(res, decode=True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('name')
|
||||
def remove_plugin(self, name, force=False):
|
||||
"""
|
||||
Remove an installed plugin.
|
||||
|
||||
Args:
|
||||
name (string): Name of the plugin to remove. The ``:latest``
|
||||
tag is optional, and is the default if omitted.
|
||||
force (bool): Disable the plugin before removing. This may
|
||||
result in issues if the plugin is in use by a container.
|
||||
|
||||
Returns:
|
||||
``True`` if successful
|
||||
"""
|
||||
url = self._url('/plugins/{0}', name)
|
||||
res = self._delete(url, params={'force': force})
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.26')
|
||||
@utils.check_resource('name')
|
||||
def upgrade_plugin(self, name, remote, privileges):
|
||||
"""
|
||||
Upgrade an installed plugin.
|
||||
|
||||
Args:
|
||||
name (string): Name of the plugin to upgrade. The ``:latest``
|
||||
tag is optional and is the default if omitted.
|
||||
remote (string): Remote reference to upgrade to. The
|
||||
``:latest`` tag is optional and is the default if omitted.
|
||||
privileges (:py:class:`list`): A list of privileges the user
|
||||
consents to grant to the plugin. Can be retrieved using
|
||||
:py:meth:`~plugin_privileges`.
|
||||
|
||||
Returns:
|
||||
An iterable object streaming the decoded API logs
|
||||
"""
|
||||
|
||||
url = self._url('/plugins/{0}/upgrade', name)
|
||||
params = {
|
||||
'remote': remote,
|
||||
}
|
||||
|
||||
headers = {}
|
||||
registry, repo_name = auth.resolve_repository_name(remote)
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers['X-Registry-Auth'] = header
|
||||
response = self._post_json(
|
||||
url, params=params, headers=headers, data=privileges,
|
||||
stream=True
|
||||
)
|
||||
self._raise_for_status(response)
|
||||
return self._stream_helper(response, decode=True)
|
||||
@@ -0,0 +1,98 @@
|
||||
import base64
|
||||
|
||||
from .. import errors, utils
|
||||
|
||||
|
||||
class SecretApiMixin:
|
||||
@utils.minimum_version('1.25')
|
||||
def create_secret(self, name, data, labels=None, driver=None):
|
||||
"""
|
||||
Create a secret
|
||||
|
||||
Args:
|
||||
name (string): Name of the secret
|
||||
data (bytes): Secret data to be stored
|
||||
labels (dict): A mapping of labels to assign to the secret
|
||||
driver (DriverConfig): A custom driver configuration. If
|
||||
unspecified, the default ``internal`` driver will be used
|
||||
|
||||
Returns (dict): ID of the newly created secret
|
||||
"""
|
||||
if not isinstance(data, bytes):
|
||||
data = data.encode('utf-8')
|
||||
|
||||
data = base64.b64encode(data)
|
||||
data = data.decode('ascii')
|
||||
body = {
|
||||
'Data': data,
|
||||
'Name': name,
|
||||
'Labels': labels
|
||||
}
|
||||
|
||||
if driver is not None:
|
||||
if utils.version_lt(self._version, '1.31'):
|
||||
raise errors.InvalidVersion(
|
||||
'Secret driver is only available for API version > 1.31'
|
||||
)
|
||||
|
||||
body['Driver'] = driver
|
||||
|
||||
url = self._url('/secrets/create')
|
||||
return self._result(
|
||||
self._post_json(url, data=body), True
|
||||
)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('id')
|
||||
def inspect_secret(self, id):
|
||||
"""
|
||||
Retrieve secret metadata
|
||||
|
||||
Args:
|
||||
id (string): Full ID of the secret to inspect
|
||||
|
||||
Returns (dict): A dictionary of metadata
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
if no secret with that ID exists
|
||||
"""
|
||||
url = self._url('/secrets/{0}', id)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('id')
|
||||
def remove_secret(self, id):
|
||||
"""
|
||||
Remove a secret
|
||||
|
||||
Args:
|
||||
id (string): Full ID of the secret to remove
|
||||
|
||||
Returns (boolean): True if successful
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
if no secret with that ID exists
|
||||
"""
|
||||
url = self._url('/secrets/{0}', id)
|
||||
res = self._delete(url)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def secrets(self, filters=None):
|
||||
"""
|
||||
List secrets
|
||||
|
||||
Args:
|
||||
filters (dict): A map of filters to process on the secrets
|
||||
list. Available filters: ``names``
|
||||
|
||||
Returns (list): A list of secrets
|
||||
"""
|
||||
url = self._url('/secrets')
|
||||
params = {}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
return self._result(self._get(url, params=params), True)
|
||||
486
backend/venv/lib/python3.9/site-packages/docker/api/service.py
Normal file
486
backend/venv/lib/python3.9/site-packages/docker/api/service.py
Normal file
@@ -0,0 +1,486 @@
|
||||
from .. import auth, errors, utils
|
||||
from ..types import ServiceMode
|
||||
|
||||
|
||||
def _check_api_features(version, task_template, update_config, endpoint_spec,
|
||||
rollback_config):
|
||||
|
||||
def raise_version_error(param, min_version):
|
||||
raise errors.InvalidVersion(
|
||||
f'{param} is not supported in API version < {min_version}'
|
||||
)
|
||||
|
||||
if update_config is not None:
|
||||
if utils.version_lt(version, '1.25'):
|
||||
if 'MaxFailureRatio' in update_config:
|
||||
raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
|
||||
if 'Monitor' in update_config:
|
||||
raise_version_error('UpdateConfig.monitor', '1.25')
|
||||
|
||||
if utils.version_lt(version, '1.28'):
|
||||
if update_config.get('FailureAction') == 'rollback':
|
||||
raise_version_error(
|
||||
'UpdateConfig.failure_action rollback', '1.28'
|
||||
)
|
||||
|
||||
if utils.version_lt(version, '1.29'):
|
||||
if 'Order' in update_config:
|
||||
raise_version_error('UpdateConfig.order', '1.29')
|
||||
|
||||
if rollback_config is not None:
|
||||
if utils.version_lt(version, '1.28'):
|
||||
raise_version_error('rollback_config', '1.28')
|
||||
|
||||
if utils.version_lt(version, '1.29'):
|
||||
if 'Order' in update_config:
|
||||
raise_version_error('RollbackConfig.order', '1.29')
|
||||
|
||||
if endpoint_spec is not None:
|
||||
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
|
||||
if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
|
||||
raise_version_error('EndpointSpec.Ports[].mode', '1.32')
|
||||
|
||||
if task_template is not None:
|
||||
if 'ForceUpdate' in task_template and utils.version_lt(
|
||||
version, '1.25'):
|
||||
raise_version_error('force_update', '1.25')
|
||||
|
||||
if task_template.get('Placement'):
|
||||
if utils.version_lt(version, '1.30'):
|
||||
if task_template['Placement'].get('Platforms'):
|
||||
raise_version_error('Placement.platforms', '1.30')
|
||||
if utils.version_lt(version, '1.27'):
|
||||
if task_template['Placement'].get('Preferences'):
|
||||
raise_version_error('Placement.preferences', '1.27')
|
||||
|
||||
if task_template.get('ContainerSpec'):
|
||||
container_spec = task_template.get('ContainerSpec')
|
||||
|
||||
if utils.version_lt(version, '1.25'):
|
||||
if container_spec.get('TTY'):
|
||||
raise_version_error('ContainerSpec.tty', '1.25')
|
||||
if container_spec.get('Hostname') is not None:
|
||||
raise_version_error('ContainerSpec.hostname', '1.25')
|
||||
if container_spec.get('Hosts') is not None:
|
||||
raise_version_error('ContainerSpec.hosts', '1.25')
|
||||
if container_spec.get('Groups') is not None:
|
||||
raise_version_error('ContainerSpec.groups', '1.25')
|
||||
if container_spec.get('DNSConfig') is not None:
|
||||
raise_version_error('ContainerSpec.dns_config', '1.25')
|
||||
if container_spec.get('Healthcheck') is not None:
|
||||
raise_version_error('ContainerSpec.healthcheck', '1.25')
|
||||
|
||||
if utils.version_lt(version, '1.28'):
|
||||
if container_spec.get('ReadOnly') is not None:
|
||||
raise_version_error('ContainerSpec.dns_config', '1.28')
|
||||
if container_spec.get('StopSignal') is not None:
|
||||
raise_version_error('ContainerSpec.stop_signal', '1.28')
|
||||
|
||||
if utils.version_lt(version, '1.30'):
|
||||
if container_spec.get('Configs') is not None:
|
||||
raise_version_error('ContainerSpec.configs', '1.30')
|
||||
if container_spec.get('Privileges') is not None:
|
||||
raise_version_error('ContainerSpec.privileges', '1.30')
|
||||
|
||||
if utils.version_lt(version, '1.35'):
|
||||
if container_spec.get('Isolation') is not None:
|
||||
raise_version_error('ContainerSpec.isolation', '1.35')
|
||||
|
||||
if utils.version_lt(version, '1.38'):
|
||||
if container_spec.get('Init') is not None:
|
||||
raise_version_error('ContainerSpec.init', '1.38')
|
||||
|
||||
if task_template.get('Resources'):
|
||||
if utils.version_lt(version, '1.32'):
|
||||
if task_template['Resources'].get('GenericResources'):
|
||||
raise_version_error('Resources.generic_resources', '1.32')
|
||||
|
||||
|
||||
def _merge_task_template(current, override):
|
||||
merged = current.copy()
|
||||
if override is not None:
|
||||
for ts_key, ts_value in override.items():
|
||||
if ts_key == 'ContainerSpec':
|
||||
if 'ContainerSpec' not in merged:
|
||||
merged['ContainerSpec'] = {}
|
||||
for cs_key, cs_value in override['ContainerSpec'].items():
|
||||
if cs_value is not None:
|
||||
merged['ContainerSpec'][cs_key] = cs_value
|
||||
elif ts_value is not None:
|
||||
merged[ts_key] = ts_value
|
||||
return merged
|
||||
|
||||
|
||||
class ServiceApiMixin:
|
||||
@utils.minimum_version('1.24')
|
||||
def create_service(
|
||||
self, task_template, name=None, labels=None, mode=None,
|
||||
update_config=None, networks=None, endpoint_config=None,
|
||||
endpoint_spec=None, rollback_config=None
|
||||
):
|
||||
"""
|
||||
Create a service.
|
||||
|
||||
Args:
|
||||
task_template (TaskTemplate): Specification of the task to start as
|
||||
part of the new service.
|
||||
name (string): User-defined name for the service. Optional.
|
||||
labels (dict): A map of labels to associate with the service.
|
||||
Optional.
|
||||
mode (ServiceMode): Scheduling mode for the service (replicated
|
||||
or global). Defaults to replicated.
|
||||
update_config (UpdateConfig): Specification for the update strategy
|
||||
of the service. Default: ``None``
|
||||
rollback_config (RollbackConfig): Specification for the rollback
|
||||
strategy of the service. Default: ``None``
|
||||
networks (:py:class:`list`): List of network names or IDs or
|
||||
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
|
||||
service to. Default: ``None``.
|
||||
endpoint_spec (EndpointSpec): Properties that can be configured to
|
||||
access and load balance a service. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
A dictionary containing an ``ID`` key for the newly created
|
||||
service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
_check_api_features(
|
||||
self._version, task_template, update_config, endpoint_spec,
|
||||
rollback_config
|
||||
)
|
||||
|
||||
url = self._url('/services/create')
|
||||
headers = {}
|
||||
image = task_template.get('ContainerSpec', {}).get('Image', None)
|
||||
if image is None:
|
||||
raise errors.DockerException(
|
||||
'Missing mandatory Image key in ContainerSpec'
|
||||
)
|
||||
if mode and not isinstance(mode, dict):
|
||||
mode = ServiceMode(mode)
|
||||
|
||||
registry, repo_name = auth.resolve_repository_name(image)
|
||||
auth_header = auth.get_config_header(self, registry)
|
||||
if auth_header:
|
||||
headers['X-Registry-Auth'] = auth_header
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
networks = networks or task_template.pop('Networks', None)
|
||||
data = {
|
||||
'Name': name,
|
||||
'Labels': labels,
|
||||
'TaskTemplate': task_template,
|
||||
'Mode': mode,
|
||||
'Networks': utils.convert_service_networks(networks),
|
||||
'EndpointSpec': endpoint_spec
|
||||
}
|
||||
|
||||
if update_config is not None:
|
||||
data['UpdateConfig'] = update_config
|
||||
|
||||
if rollback_config is not None:
|
||||
data['RollbackConfig'] = rollback_config
|
||||
|
||||
return self._result(
|
||||
self._post_json(url, data=data, headers=headers), True
|
||||
)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource('service')
|
||||
def inspect_service(self, service, insert_defaults=None):
|
||||
"""
|
||||
Return information about a service.
|
||||
|
||||
Args:
|
||||
service (str): Service name or ID.
|
||||
insert_defaults (boolean): If true, default values will be merged
|
||||
into the service inspect output.
|
||||
|
||||
Returns:
|
||||
(dict): A dictionary of the server-side representation of the
|
||||
service, including all relevant properties.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/services/{0}', service)
|
||||
params = {}
|
||||
if insert_defaults is not None:
|
||||
if utils.version_lt(self._version, '1.29'):
|
||||
raise errors.InvalidVersion(
|
||||
'insert_defaults is not supported in API version < 1.29'
|
||||
)
|
||||
params['insertDefaults'] = insert_defaults
|
||||
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource('task')
|
||||
def inspect_task(self, task):
|
||||
"""
|
||||
Retrieve information about a task.
|
||||
|
||||
Args:
|
||||
task (str): Task ID
|
||||
|
||||
Returns:
|
||||
(dict): Information about the task.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/tasks/{0}', task)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource('service')
|
||||
def remove_service(self, service):
|
||||
"""
|
||||
Stop and remove a service.
|
||||
|
||||
Args:
|
||||
service (str): Service name or ID
|
||||
|
||||
Returns:
|
||||
``True`` if successful.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
url = self._url('/services/{0}', service)
|
||||
resp = self._delete(url)
|
||||
self._raise_for_status(resp)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def services(self, filters=None, status=None):
|
||||
"""
|
||||
List services.
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id``, ``name`` , ``label`` and ``mode``.
|
||||
Default: ``None``.
|
||||
status (bool): Include the service task count of running and
|
||||
desired tasks. Default: ``None``.
|
||||
|
||||
Returns:
|
||||
A list of dictionaries containing data about each service.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
if status is not None:
|
||||
if utils.version_lt(self._version, '1.41'):
|
||||
raise errors.InvalidVersion(
|
||||
'status is not supported in API version < 1.41'
|
||||
)
|
||||
params['status'] = status
|
||||
url = self._url('/services')
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
@utils.check_resource('service')
|
||||
def service_logs(self, service, details=False, follow=False, stdout=False,
|
||||
stderr=False, since=0, timestamps=False, tail='all',
|
||||
is_tty=None):
|
||||
"""
|
||||
Get log stream for a service.
|
||||
Note: This endpoint works only for services with the ``json-file``
|
||||
or ``journald`` logging drivers.
|
||||
|
||||
Args:
|
||||
service (str): ID or name of the service
|
||||
details (bool): Show extra details provided to logs.
|
||||
Default: ``False``
|
||||
follow (bool): Keep connection open to read logs as they are
|
||||
sent by the Engine. Default: ``False``
|
||||
stdout (bool): Return logs from ``stdout``. Default: ``False``
|
||||
stderr (bool): Return logs from ``stderr``. Default: ``False``
|
||||
since (int): UNIX timestamp for the logs staring point.
|
||||
Default: 0
|
||||
timestamps (bool): Add timestamps to every log line.
|
||||
tail (string or int): Number of log lines to be returned,
|
||||
counting from the current end of the logs. Specify an
|
||||
integer or ``'all'`` to output all log lines.
|
||||
Default: ``all``
|
||||
is_tty (bool): Whether the service's :py:class:`ContainerSpec`
|
||||
enables the TTY option. If omitted, the method will query
|
||||
the Engine for the information, causing an additional
|
||||
roundtrip.
|
||||
|
||||
Returns (generator): Logs for the service.
|
||||
"""
|
||||
params = {
|
||||
'details': details,
|
||||
'follow': follow,
|
||||
'stdout': stdout,
|
||||
'stderr': stderr,
|
||||
'since': since,
|
||||
'timestamps': timestamps,
|
||||
'tail': tail
|
||||
}
|
||||
|
||||
url = self._url('/services/{0}/logs', service)
|
||||
res = self._get(url, params=params, stream=True)
|
||||
if is_tty is None:
|
||||
is_tty = self.inspect_service(
|
||||
service
|
||||
)['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
|
||||
return self._get_result_tty(True, res, is_tty)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def tasks(self, filters=None):
|
||||
"""
|
||||
Retrieve a list of tasks.
|
||||
|
||||
Args:
|
||||
filters (dict): A map of filters to process on the tasks list.
|
||||
Valid filters: ``id``, ``name``, ``service``, ``node``,
|
||||
``label`` and ``desired-state``.
|
||||
|
||||
Returns:
|
||||
(:py:class:`list`): List of task dictionaries.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
url = self._url('/tasks')
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
@utils.check_resource('service')
|
||||
def update_service(self, service, version, task_template=None, name=None,
|
||||
labels=None, mode=None, update_config=None,
|
||||
networks=None, endpoint_config=None,
|
||||
endpoint_spec=None, fetch_current_spec=False,
|
||||
rollback_config=None):
|
||||
"""
|
||||
Update a service.
|
||||
|
||||
Args:
|
||||
service (string): A service identifier (either its name or service
|
||||
ID).
|
||||
version (int): The version number of the service object being
|
||||
updated. This is required to avoid conflicting writes.
|
||||
task_template (TaskTemplate): Specification of the updated task to
|
||||
start as part of the service.
|
||||
name (string): New name for the service. Optional.
|
||||
labels (dict): A map of labels to associate with the service.
|
||||
Optional.
|
||||
mode (ServiceMode): Scheduling mode for the service (replicated
|
||||
or global). Defaults to replicated.
|
||||
update_config (UpdateConfig): Specification for the update strategy
|
||||
of the service. Default: ``None``.
|
||||
rollback_config (RollbackConfig): Specification for the rollback
|
||||
strategy of the service. Default: ``None``
|
||||
networks (:py:class:`list`): List of network names or IDs or
|
||||
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
|
||||
service to. Default: ``None``.
|
||||
endpoint_spec (EndpointSpec): Properties that can be configured to
|
||||
access and load balance a service. Default: ``None``.
|
||||
fetch_current_spec (boolean): Use the undefined settings from the
|
||||
current specification of the service. Default: ``False``
|
||||
|
||||
Returns:
|
||||
A dictionary containing a ``Warnings`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
_check_api_features(
|
||||
self._version, task_template, update_config, endpoint_spec,
|
||||
rollback_config
|
||||
)
|
||||
|
||||
if fetch_current_spec:
|
||||
inspect_defaults = True
|
||||
if utils.version_lt(self._version, '1.29'):
|
||||
inspect_defaults = None
|
||||
current = self.inspect_service(
|
||||
service, insert_defaults=inspect_defaults
|
||||
)['Spec']
|
||||
|
||||
else:
|
||||
current = {}
|
||||
|
||||
url = self._url('/services/{0}/update', service)
|
||||
data = {}
|
||||
headers = {}
|
||||
|
||||
data['Name'] = current.get('Name') if name is None else name
|
||||
|
||||
data['Labels'] = current.get('Labels') if labels is None else labels
|
||||
|
||||
if mode is not None:
|
||||
if not isinstance(mode, dict):
|
||||
mode = ServiceMode(mode)
|
||||
data['Mode'] = mode
|
||||
else:
|
||||
data['Mode'] = current.get('Mode')
|
||||
|
||||
data['TaskTemplate'] = _merge_task_template(
|
||||
current.get('TaskTemplate', {}), task_template
|
||||
)
|
||||
|
||||
container_spec = data['TaskTemplate'].get('ContainerSpec', {})
|
||||
image = container_spec.get('Image', None)
|
||||
if image is not None:
|
||||
registry, repo_name = auth.resolve_repository_name(image)
|
||||
auth_header = auth.get_config_header(self, registry)
|
||||
if auth_header:
|
||||
headers['X-Registry-Auth'] = auth_header
|
||||
|
||||
if update_config is not None:
|
||||
data['UpdateConfig'] = update_config
|
||||
else:
|
||||
data['UpdateConfig'] = current.get('UpdateConfig')
|
||||
|
||||
if rollback_config is not None:
|
||||
data['RollbackConfig'] = rollback_config
|
||||
else:
|
||||
data['RollbackConfig'] = current.get('RollbackConfig')
|
||||
|
||||
if networks is not None:
|
||||
converted_networks = utils.convert_service_networks(networks)
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
data['Networks'] = converted_networks
|
||||
else:
|
||||
data['TaskTemplate']['Networks'] = converted_networks
|
||||
elif utils.version_lt(self._version, '1.25'):
|
||||
data['Networks'] = current.get('Networks')
|
||||
elif data['TaskTemplate'].get('Networks') is None:
|
||||
current_task_template = current.get('TaskTemplate', {})
|
||||
current_networks = current_task_template.get('Networks')
|
||||
if current_networks is None:
|
||||
current_networks = current.get('Networks')
|
||||
if current_networks is not None:
|
||||
data['TaskTemplate']['Networks'] = current_networks
|
||||
|
||||
if endpoint_spec is not None:
|
||||
data['EndpointSpec'] = endpoint_spec
|
||||
else:
|
||||
data['EndpointSpec'] = current.get('EndpointSpec')
|
||||
|
||||
resp = self._post_json(
|
||||
url, data=data, params={'version': version}, headers=headers
|
||||
)
|
||||
return self._result(resp, json=True)
|
||||
462
backend/venv/lib/python3.9/site-packages/docker/api/swarm.py
Normal file
462
backend/venv/lib/python3.9/site-packages/docker/api/swarm.py
Normal file
@@ -0,0 +1,462 @@
|
||||
import http.client as http_client
|
||||
import logging
|
||||
|
||||
from .. import errors, types, utils
|
||||
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwarmApiMixin:
|
||||
|
||||
def create_swarm_spec(self, *args, **kwargs):
|
||||
"""
|
||||
Create a :py:class:`docker.types.SwarmSpec` instance that can be used
|
||||
as the ``swarm_spec`` argument in
|
||||
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
|
||||
|
||||
Args:
|
||||
task_history_retention_limit (int): Maximum number of tasks
|
||||
history stored.
|
||||
snapshot_interval (int): Number of logs entries between snapshot.
|
||||
keep_old_snapshots (int): Number of snapshots to keep beyond the
|
||||
current snapshot.
|
||||
log_entries_for_slow_followers (int): Number of log entries to
|
||||
keep around to sync up slow followers after a snapshot is
|
||||
created.
|
||||
heartbeat_tick (int): Amount of ticks (in seconds) between each
|
||||
heartbeat.
|
||||
election_tick (int): Amount of ticks (in seconds) needed without a
|
||||
leader to trigger a new election.
|
||||
dispatcher_heartbeat_period (int): The delay for an agent to send
|
||||
a heartbeat to the dispatcher.
|
||||
node_cert_expiry (int): Automatic expiry for nodes certificates.
|
||||
external_cas (:py:class:`list`): Configuration for forwarding
|
||||
signing requests to an external certificate authority. Use
|
||||
a list of :py:class:`docker.types.SwarmExternalCA`.
|
||||
name (string): Swarm's name
|
||||
labels (dict): User-defined key/value metadata.
|
||||
signing_ca_cert (str): The desired signing CA certificate for all
|
||||
swarm node TLS leaf certificates, in PEM format.
|
||||
signing_ca_key (str): The desired signing CA key for all swarm
|
||||
node TLS leaf certificates, in PEM format.
|
||||
ca_force_rotate (int): An integer whose purpose is to force swarm
|
||||
to generate a new signing CA certificate and key, if none have
|
||||
been specified.
|
||||
autolock_managers (boolean): If set, generate a key and use it to
|
||||
lock data stored on the managers.
|
||||
log_driver (DriverConfig): The default log driver to use for tasks
|
||||
created in the orchestrator.
|
||||
|
||||
Returns:
|
||||
:py:class:`docker.types.SwarmSpec`
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> spec = client.api.create_swarm_spec(
|
||||
snapshot_interval=5000, log_entries_for_slow_followers=1200
|
||||
)
|
||||
>>> client.api.init_swarm(
|
||||
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
|
||||
force_new_cluster=False, swarm_spec=spec
|
||||
)
|
||||
"""
|
||||
ext_ca = kwargs.pop('external_ca', None)
|
||||
if ext_ca:
|
||||
kwargs['external_cas'] = [ext_ca]
|
||||
return types.SwarmSpec(self._version, *args, **kwargs)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def get_unlock_key(self):
|
||||
"""
|
||||
Get the unlock key for this Swarm manager.
|
||||
|
||||
Returns:
|
||||
A ``dict`` containing an ``UnlockKey`` member
|
||||
"""
|
||||
return self._result(self._get(self._url('/swarm/unlockkey')), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
|
||||
force_new_cluster=False, swarm_spec=None,
|
||||
default_addr_pool=None, subnet_size=None,
|
||||
data_path_addr=None, data_path_port=None):
|
||||
"""
|
||||
Initialize a new Swarm using the current connected engine as the first
|
||||
node.
|
||||
|
||||
Args:
|
||||
advertise_addr (string): Externally reachable address advertised
|
||||
to other nodes. This can either be an address/port combination
|
||||
in the form ``192.168.1.1:4567``, or an interface followed by a
|
||||
port number, like ``eth0:4567``. If the port number is omitted,
|
||||
the port number from the listen address is used. If
|
||||
``advertise_addr`` is not specified, it will be automatically
|
||||
detected when possible. Default: None
|
||||
listen_addr (string): Listen address used for inter-manager
|
||||
communication, as well as determining the networking interface
|
||||
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
|
||||
an address/port combination in the form ``192.168.1.1:4567``,
|
||||
or an interface followed by a port number, like ``eth0:4567``.
|
||||
If the port number is omitted, the default swarm listening port
|
||||
is used. Default: '0.0.0.0:2377'
|
||||
force_new_cluster (bool): Force creating a new Swarm, even if
|
||||
already part of one. Default: False
|
||||
swarm_spec (dict): Configuration settings of the new Swarm. Use
|
||||
``APIClient.create_swarm_spec`` to generate a valid
|
||||
configuration. Default: None
|
||||
default_addr_pool (list of strings): Default Address Pool specifies
|
||||
default subnet pools for global scope networks. Each pool
|
||||
should be specified as a CIDR block, like '10.0.0.0/8'.
|
||||
Default: None
|
||||
subnet_size (int): SubnetSize specifies the subnet size of the
|
||||
networks created from the default subnet pool. Default: None
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
data_path_port (int): Port number to use for data path traffic.
|
||||
Acceptable port range is 1024 to 49151. If set to ``None`` or
|
||||
0, the default port 4789 will be used. Default: None
|
||||
|
||||
Returns:
|
||||
(str): The ID of the created node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
|
||||
url = self._url('/swarm/init')
|
||||
if swarm_spec is not None and not isinstance(swarm_spec, dict):
|
||||
raise TypeError('swarm_spec must be a dictionary')
|
||||
|
||||
if default_addr_pool is not None:
|
||||
if utils.version_lt(self._version, '1.39'):
|
||||
raise errors.InvalidVersion(
|
||||
'Address pool is only available for API version >= 1.39'
|
||||
)
|
||||
# subnet_size becomes 0 if not set with default_addr_pool
|
||||
if subnet_size is None:
|
||||
subnet_size = DEFAULT_SWARM_SUBNET_SIZE
|
||||
|
||||
if subnet_size is not None:
|
||||
if utils.version_lt(self._version, '1.39'):
|
||||
raise errors.InvalidVersion(
|
||||
'Subnet size is only available for API version >= 1.39'
|
||||
)
|
||||
# subnet_size is ignored if set without default_addr_pool
|
||||
if default_addr_pool is None:
|
||||
default_addr_pool = DEFAULT_SWARM_ADDR_POOL
|
||||
|
||||
data = {
|
||||
'AdvertiseAddr': advertise_addr,
|
||||
'ListenAddr': listen_addr,
|
||||
'DefaultAddrPool': default_addr_pool,
|
||||
'SubnetSize': subnet_size,
|
||||
'ForceNewCluster': force_new_cluster,
|
||||
'Spec': swarm_spec,
|
||||
}
|
||||
|
||||
if data_path_addr is not None:
|
||||
if utils.version_lt(self._version, '1.30'):
|
||||
raise errors.InvalidVersion(
|
||||
'Data address path is only available for '
|
||||
'API version >= 1.30'
|
||||
)
|
||||
data['DataPathAddr'] = data_path_addr
|
||||
|
||||
if data_path_port is not None:
|
||||
if utils.version_lt(self._version, '1.40'):
|
||||
raise errors.InvalidVersion(
|
||||
'Data path port is only available for '
|
||||
'API version >= 1.40'
|
||||
)
|
||||
data['DataPathPort'] = data_path_port
|
||||
|
||||
response = self._post_json(url, data=data)
|
||||
return self._result(response, json=True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_swarm(self):
|
||||
"""
|
||||
Retrieve low-level information about the current swarm.
|
||||
|
||||
Returns:
|
||||
A dictionary containing data about the swarm.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/swarm')
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.check_resource('node_id')
|
||||
@utils.minimum_version('1.24')
|
||||
def inspect_node(self, node_id):
|
||||
"""
|
||||
Retrieve low-level information about a swarm node
|
||||
|
||||
Args:
|
||||
node_id (string): ID of the node to be inspected.
|
||||
|
||||
Returns:
|
||||
A dictionary containing data about this node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/nodes/{0}', node_id)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
|
||||
advertise_addr=None, data_path_addr=None):
|
||||
"""
|
||||
Make this Engine join a swarm that has already been created.
|
||||
|
||||
Args:
|
||||
remote_addrs (:py:class:`list`): Addresses of one or more manager
|
||||
nodes already participating in the Swarm to join.
|
||||
join_token (string): Secret token for joining this Swarm.
|
||||
listen_addr (string): Listen address used for inter-manager
|
||||
communication if the node gets promoted to manager, as well as
|
||||
determining the networking interface used for the VXLAN Tunnel
|
||||
Endpoint (VTEP). Default: ``'0.0.0.0:2377``
|
||||
advertise_addr (string): Externally reachable address advertised
|
||||
to other nodes. This can either be an address/port combination
|
||||
in the form ``192.168.1.1:4567``, or an interface followed by a
|
||||
port number, like ``eth0:4567``. If the port number is omitted,
|
||||
the port number from the listen address is used. If
|
||||
AdvertiseAddr is not specified, it will be automatically
|
||||
detected when possible. Default: ``None``
|
||||
data_path_addr (string): Address or interface to use for data path
|
||||
traffic. For example, 192.168.1.1, or an interface, like eth0.
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
data = {
|
||||
'RemoteAddrs': remote_addrs,
|
||||
'ListenAddr': listen_addr,
|
||||
'JoinToken': join_token,
|
||||
'AdvertiseAddr': advertise_addr,
|
||||
}
|
||||
|
||||
if data_path_addr is not None:
|
||||
if utils.version_lt(self._version, '1.30'):
|
||||
raise errors.InvalidVersion(
|
||||
'Data address path is only available for '
|
||||
'API version >= 1.30'
|
||||
)
|
||||
data['DataPathAddr'] = data_path_addr
|
||||
|
||||
url = self._url('/swarm/join')
|
||||
response = self._post_json(url, data=data)
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def leave_swarm(self, force=False):
|
||||
"""
|
||||
Leave a swarm.
|
||||
|
||||
Args:
|
||||
force (bool): Leave the swarm even if this node is a manager.
|
||||
Default: ``False``
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/swarm/leave')
|
||||
response = self._post(url, params={'force': force})
|
||||
# Ignore "this node is not part of a swarm" error
|
||||
if force and response.status_code == http_client.NOT_ACCEPTABLE:
|
||||
return True
|
||||
# FIXME: Temporary workaround for 1.13.0-rc bug
|
||||
# https://github.com/docker/docker/issues/29192
|
||||
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
|
||||
return True
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def nodes(self, filters=None):
|
||||
"""
|
||||
List swarm nodes.
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the nodes list. Valid
|
||||
filters: ``id``, ``name``, ``membership`` and ``role``.
|
||||
Default: ``None``
|
||||
|
||||
Returns:
|
||||
A list of dictionaries containing data about each swarm node.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/nodes')
|
||||
params = {}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
@utils.check_resource('node_id')
|
||||
@utils.minimum_version('1.24')
|
||||
def remove_node(self, node_id, force=False):
|
||||
"""
|
||||
Remove a node from the swarm.
|
||||
|
||||
Args:
|
||||
node_id (string): ID of the node to be removed.
|
||||
force (bool): Force remove an active node. Default: `False`
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.NotFound`
|
||||
If the node referenced doesn't exist in the swarm.
|
||||
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
Returns:
|
||||
`True` if the request was successful.
|
||||
"""
|
||||
url = self._url('/nodes/{0}', node_id)
|
||||
params = {
|
||||
'force': force
|
||||
}
|
||||
res = self._delete(url, params=params)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def unlock_swarm(self, key):
|
||||
"""
|
||||
Unlock a locked swarm.
|
||||
|
||||
Args:
|
||||
key (string): The unlock key as provided by
|
||||
:py:meth:`get_unlock_key`
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.InvalidArgument`
|
||||
If the key argument is in an incompatible format
|
||||
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Returns:
|
||||
`True` if the request was successful.
|
||||
|
||||
Example:
|
||||
|
||||
>>> key = client.api.get_unlock_key()
|
||||
>>> client.unlock_swarm(key)
|
||||
|
||||
"""
|
||||
if isinstance(key, dict):
|
||||
if 'UnlockKey' not in key:
|
||||
raise errors.InvalidArgument('Invalid unlock key format')
|
||||
else:
|
||||
key = {'UnlockKey': key}
|
||||
|
||||
url = self._url('/swarm/unlock')
|
||||
res = self._post_json(url, data=key)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def update_node(self, node_id, version, node_spec=None):
|
||||
"""
|
||||
Update the node's configuration
|
||||
|
||||
Args:
|
||||
|
||||
node_id (string): ID of the node to be updated.
|
||||
version (int): The version number of the node object being
|
||||
updated. This is required to avoid conflicting writes.
|
||||
node_spec (dict): Configuration settings to update. Any values
|
||||
not provided will be removed. Default: ``None``
|
||||
|
||||
Returns:
|
||||
`True` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> node_spec = {'Availability': 'active',
|
||||
'Name': 'node-name',
|
||||
'Role': 'manager',
|
||||
'Labels': {'foo': 'bar'}
|
||||
}
|
||||
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
|
||||
node_spec=node_spec)
|
||||
|
||||
"""
|
||||
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
|
||||
res = self._post_json(url, data=node_spec)
|
||||
self._raise_for_status(res)
|
||||
return True
|
||||
|
||||
@utils.minimum_version('1.24')
|
||||
def update_swarm(self, version, swarm_spec=None,
|
||||
rotate_worker_token=False,
|
||||
rotate_manager_token=False,
|
||||
rotate_manager_unlock_key=False):
|
||||
"""
|
||||
Update the Swarm's configuration
|
||||
|
||||
Args:
|
||||
version (int): The version number of the swarm object being
|
||||
updated. This is required to avoid conflicting writes.
|
||||
swarm_spec (dict): Configuration settings to update. Use
|
||||
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
|
||||
generate a valid configuration. Default: ``None``.
|
||||
rotate_worker_token (bool): Rotate the worker join token. Default:
|
||||
``False``.
|
||||
rotate_manager_token (bool): Rotate the manager join token.
|
||||
Default: ``False``.
|
||||
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
|
||||
Default: ``False``.
|
||||
|
||||
Returns:
|
||||
``True`` if the request went through.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
url = self._url('/swarm/update')
|
||||
params = {
|
||||
'rotateWorkerToken': rotate_worker_token,
|
||||
'rotateManagerToken': rotate_manager_token,
|
||||
'version': version
|
||||
}
|
||||
if rotate_manager_unlock_key:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'Rotate manager unlock key '
|
||||
'is only available for API version >= 1.25'
|
||||
)
|
||||
params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
|
||||
|
||||
response = self._post_json(url, data=swarm_spec, params=params)
|
||||
self._raise_for_status(response)
|
||||
return True
|
||||
163
backend/venv/lib/python3.9/site-packages/docker/api/volume.py
Normal file
163
backend/venv/lib/python3.9/site-packages/docker/api/volume.py
Normal file
@@ -0,0 +1,163 @@
|
||||
from .. import errors, utils
|
||||
|
||||
|
||||
class VolumeApiMixin:
|
||||
def volumes(self, filters=None):
|
||||
"""
|
||||
List volumes currently registered by the docker daemon. Similar to the
|
||||
``docker volume ls`` command.
|
||||
|
||||
Args:
|
||||
filters (dict): Server-side list filtering options.
|
||||
|
||||
Returns:
|
||||
(dict): Dictionary with list of volume objects as value of the
|
||||
``Volumes`` key.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> client.api.volumes()
|
||||
{u'Volumes': [{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'},
|
||||
{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
|
||||
u'Name': u'baz'}]}
|
||||
"""
|
||||
|
||||
params = {
|
||||
'filters': utils.convert_filters(filters) if filters else None
|
||||
}
|
||||
url = self._url('/volumes')
|
||||
return self._result(self._get(url, params=params), True)
|
||||
|
||||
def create_volume(self, name=None, driver=None, driver_opts=None,
|
||||
labels=None):
|
||||
"""
|
||||
Create and register a named volume
|
||||
|
||||
Args:
|
||||
name (str): Name of the volume
|
||||
driver (str): Name of the driver used to create the volume
|
||||
driver_opts (dict): Driver options as a key-value dictionary
|
||||
labels (dict): Labels to set on the volume
|
||||
|
||||
Returns:
|
||||
(dict): The created volume reference object
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> volume = client.api.create_volume(
|
||||
... name='foobar',
|
||||
... driver='local',
|
||||
... driver_opts={'foo': 'bar', 'baz': 'false'},
|
||||
... labels={"key": "value"},
|
||||
... )
|
||||
... print(volume)
|
||||
{u'Driver': u'local',
|
||||
u'Labels': {u'key': u'value'},
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar',
|
||||
u'Scope': u'local'}
|
||||
|
||||
"""
|
||||
url = self._url('/volumes/create')
|
||||
if driver_opts is not None and not isinstance(driver_opts, dict):
|
||||
raise TypeError('driver_opts must be a dictionary')
|
||||
|
||||
data = {
|
||||
'Name': name,
|
||||
'Driver': driver,
|
||||
'DriverOpts': driver_opts,
|
||||
}
|
||||
|
||||
if labels is not None:
|
||||
if utils.compare_version('1.23', self._version) < 0:
|
||||
raise errors.InvalidVersion(
|
||||
'volume labels were introduced in API 1.23'
|
||||
)
|
||||
if not isinstance(labels, dict):
|
||||
raise TypeError('labels must be a dictionary')
|
||||
data["Labels"] = labels
|
||||
|
||||
return self._result(self._post_json(url, data=data), True)
|
||||
|
||||
def inspect_volume(self, name):
|
||||
"""
|
||||
Retrieve volume info by name.
|
||||
|
||||
Args:
|
||||
name (str): volume name
|
||||
|
||||
Returns:
|
||||
(dict): Volume information dictionary
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
|
||||
Example:
|
||||
|
||||
>>> client.api.inspect_volume('foobar')
|
||||
{u'Driver': u'local',
|
||||
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
|
||||
u'Name': u'foobar'}
|
||||
|
||||
"""
|
||||
url = self._url('/volumes/{0}', name)
|
||||
return self._result(self._get(url), True)
|
||||
|
||||
@utils.minimum_version('1.25')
|
||||
def prune_volumes(self, filters=None):
|
||||
"""
|
||||
Delete unused volumes
|
||||
|
||||
Args:
|
||||
filters (dict): Filters to process on the prune list.
|
||||
|
||||
Returns:
|
||||
(dict): A dict containing a list of deleted volume names and
|
||||
the amount of disk space reclaimed in bytes.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If the server returns an error.
|
||||
"""
|
||||
params = {}
|
||||
if filters:
|
||||
params['filters'] = utils.convert_filters(filters)
|
||||
url = self._url('/volumes/prune')
|
||||
return self._result(self._post(url, params=params), True)
|
||||
|
||||
def remove_volume(self, name, force=False):
|
||||
"""
|
||||
Remove a volume. Similar to the ``docker volume rm`` command.
|
||||
|
||||
Args:
|
||||
name (str): The volume's name
|
||||
force (bool): Force removal of volumes that were already removed
|
||||
out of band by the volume driver plugin.
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If volume failed to remove.
|
||||
"""
|
||||
params = {}
|
||||
if force:
|
||||
if utils.version_lt(self._version, '1.25'):
|
||||
raise errors.InvalidVersion(
|
||||
'force removal was introduced in API 1.25'
|
||||
)
|
||||
params = {'force': force}
|
||||
|
||||
url = self._url('/volumes/{0}', name, params=params)
|
||||
resp = self._delete(url)
|
||||
self._raise_for_status(resp)
|
||||
Reference in New Issue
Block a user