Skip to content

Commit

Permalink
Merge pull request #1921 from docker/3.1.0-release
Browse files Browse the repository at this point in the history
3.1.0 release
  • Loading branch information
shin- authored Feb 22, 2018
2 parents c9ee022 + 1d85818 commit 79f27c6
Show file tree
Hide file tree
Showing 26 changed files with 415 additions and 214 deletions.
6 changes: 3 additions & 3 deletions docker/api/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,10 +350,10 @@ def _multiplexed_response_stream_helper(self, response):
break
yield data

def _stream_raw_result(self, response):
''' Stream result for TTY-enabled container '''
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
for out in response.iter_content(chunk_size=1, decode_unicode=True):
for out in response.iter_content(chunk_size, decode):
yield out

def _read_from_socket(self, response, stream, tty=False):
Expand Down
17 changes: 13 additions & 4 deletions docker/api/container.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from .. import errors
from .. import utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..types import (
ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig
)
Expand Down Expand Up @@ -438,6 +439,8 @@ def create_host_config(self, *args, **kwargs):
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
Expand Down Expand Up @@ -643,12 +646,15 @@ def diff(self, container):
)

@utils.check_resource('container')
def export(self, container):
def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Export the contents of a filesystem as a tar archive.
Args:
container (str): The container to export
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): The archived filesystem data stream
Expand All @@ -660,17 +666,20 @@ def export(self, container):
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
return self._stream_raw_result(res)
return self._stream_raw_result(res, chunk_size, False)

@utils.check_resource('container')
def get_archive(self, container, path):
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
Args:
container (str): The container where the file is located
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
Expand All @@ -688,7 +697,7 @@ def get_archive(self, container, path):
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
self._stream_raw_result(res),
self._stream_raw_result(res, chunk_size, False),
utils.decode_json_header(encoded_stat) if encoded_stat else None
)

Expand Down
8 changes: 6 additions & 2 deletions docker/api/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,23 @@
import six

from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE

log = logging.getLogger(__name__)


class ImageApiMixin(object):

@utils.check_resource('image')
def get_image(self, image):
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): A stream of raw archive data.
Expand All @@ -34,7 +38,7 @@ def get_image(self, image):
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True)
return self._stream_raw_result(res)
return self._stream_raw_result(res, chunk_size, False)

@utils.check_resource('image')
def history(self, image):
Expand Down
5 changes: 5 additions & 0 deletions docker/api/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ def raise_version_error(param, min_version):
if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35')

if task_template.get('Resources'):
if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'):
raise_version_error('Resources.generic_resources', '1.32')


def _merge_task_template(current, override):
merged = current.copy()
Expand Down
5 changes: 4 additions & 1 deletion docker/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,12 @@ def resolve_authconfig(authconfig, registry=None):
log.debug(
'Using credentials store "{0}"'.format(store_name)
)
return _resolve_authconfig_credstore(
cfg = _resolve_authconfig_credstore(
authconfig, registry, store_name
)
if cfg is not None:
return cfg
log.debug('No entry in credstore - fetching from auth dict')

# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
Expand Down
1 change: 1 addition & 0 deletions docker/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,4 @@

DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
20 changes: 16 additions & 4 deletions docker/models/containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from collections import namedtuple

from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import (ContainerError, ImageNotFound,
create_unexpected_kwargs_error)
from ..types import HostConfig
Expand Down Expand Up @@ -181,26 +182,34 @@ def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
exec_output
)

def export(self):
def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Export the contents of the container's filesystem as a tar archive.
Args:
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.export(self.id)
return self.client.api.export(self.id, chunk_size)

def get_archive(self, path):
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
Expand All @@ -210,7 +219,7 @@ def get_archive(self, path):
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.get_archive(self.id, path)
return self.client.api.get_archive(self.id, path, chunk_size)

def kill(self, signal=None):
"""
Expand Down Expand Up @@ -515,6 +524,8 @@ def run(self, image, command=None, stdout=True, stderr=False,
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
Expand Down Expand Up @@ -912,6 +923,7 @@ def prune(self, filters=None):
'cpuset_mems',
'cpu_rt_period',
'cpu_rt_runtime',
'device_cgroup_rules',
'device_read_bps',
'device_read_iops',
'device_write_bps',
Expand Down
14 changes: 11 additions & 3 deletions docker/models/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import six

from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import BuildError, ImageLoadError
from ..utils import parse_repository_tag
from ..utils.json_stream import json_stream
Expand Down Expand Up @@ -58,10 +59,15 @@ def history(self):
"""
return self.client.api.history(self.id)

def save(self):
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): A stream of raw archive data.
Expand All @@ -77,7 +83,7 @@ def save(self):
>>> f.write(chunk)
>>> f.close()
"""
return self.client.api.get_image(self.id)
return self.client.api.get_image(self.id, chunk_size)

def tag(self, repository, tag=None, **kwargs):
"""
Expand Down Expand Up @@ -308,7 +314,9 @@ def pull(self, repository, tag=None, **kwargs):

self.client.api.pull(repository, tag=tag, **kwargs)
if tag:
return self.get('{0}:{1}'.format(repository, tag))
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))
return self.list(repository)

def push(self, repository, tag=None, **kwargs):
Expand Down
15 changes: 15 additions & 0 deletions docker/models/services.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,11 @@ def update(self, **kwargs):
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']

if kwargs.get('force_update') is True:
task_template = self.attrs['Spec']['TaskTemplate']
current_value = int(task_template.get('ForceUpdate', 0))
kwargs['force_update'] = current_value + 1

create_kwargs = _get_create_service_kwargs('update', kwargs)

return self.client.api.update_service(
Expand Down Expand Up @@ -124,6 +129,16 @@ def scale(self, replicas):
service_mode,
fetch_current_spec=True)

def force_update(self):
"""
Force update the service even if no changes require it.
Returns:
``True``if successful.
"""

return self.update(force_update=True, fetch_current_spec=True)


class ServiceCollection(Collection):
"""Services on the Docker server."""
Expand Down
12 changes: 11 additions & 1 deletion docker/types/containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,8 @@ def __init__(self, version, binds=None, port_bindings=None,
init=None, init_path=None, volume_driver=None,
cpu_count=None, cpu_percent=None, nano_cpus=None,
cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None):
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None):

if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
Expand Down Expand Up @@ -466,6 +467,15 @@ def __init__(self, version, binds=None, port_bindings=None,
raise host_config_version_error('mounts', '1.30')
self['Mounts'] = mounts

if device_cgroup_rules is not None:
if version_lt(version, '1.28'):
raise host_config_version_error('device_cgroup_rules', '1.28')
if not isinstance(device_cgroup_rules, list):
raise host_config_type_error(
'device_cgroup_rules', device_cgroup_rules, 'list'
)
self['DeviceCgroupRules'] = device_cgroup_rules


def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
Expand Down
37 changes: 35 additions & 2 deletions docker/types/services.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,9 +306,13 @@ class Resources(dict):
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
generic_resources (dict or :py:class:`list`): Node level generic
resources, for example a GPU, using the following format:
``{ resource_name: resource_value }``. Alternatively, a list of
of resource specifications as defined by the Engine API.
"""
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None):
mem_reservation=None, generic_resources=None):
limits = {}
reservation = {}
if cpu_limit is not None:
Expand All @@ -319,13 +323,42 @@ def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation

if generic_resources is not None:
reservation['GenericResources'] = (
_convert_generic_resources_dict(generic_resources)
)
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation


def _convert_generic_resources_dict(generic_resources):
if isinstance(generic_resources, list):
return generic_resources
if not isinstance(generic_resources, dict):
raise errors.InvalidArgument(
'generic_resources must be a dict or a list'
' (found {})'.format(type(generic_resources))
)
resources = []
for kind, value in six.iteritems(generic_resources):
resource_type = None
if isinstance(value, int):
resource_type = 'DiscreteResourceSpec'
elif isinstance(value, str):
resource_type = 'NamedResourceSpec'
else:
raise errors.InvalidArgument(
'Unsupported generic resource reservation '
'type: {}'.format({kind: value})
)
resources.append({
resource_type: {'Kind': kind, 'Value': value}
})
return resources


class UpdateConfig(dict):
"""
Expand Down
Loading

0 comments on commit 79f27c6

Please sign in to comment.