From 4f4e2aad5a1106f66f62e010a90265dc3254905d Mon Sep 17 00:00:00 2001 From: James Weaver Date: Fri, 5 Apr 2019 11:46:17 +0100 Subject: [PATCH 01/76] Make listing of modules in setup.py explicit --- setup.py | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/setup.py b/setup.py index 0e704cb..2cb4e62 100644 --- a/setup.py +++ b/setup.py @@ -19,30 +19,9 @@ from setuptools import setup import os - -def is_package(path): - return ( - os.path.isdir(path) and - os.path.isfile(os.path.join(path, '__init__.py')) - ) - - -def find_packages(path, base=""): - """ Find all packages in path """ - packages = {} - for item in os.listdir(path): - dir = os.path.join(path, item) - if is_package(dir): - if base: - module_name = "%(base)s.%(item)s" % vars() - else: - module_name = item - packages[module_name] = dir - packages.update(find_packages(dir, module_name)) - return packages - - -packages = find_packages(".") +packages = { + 'mediagrains' : 'mediagrains' +} package_names = packages.keys() packages_required = [ From f61304534303aa0e7b14225f01c20b58384d9e30 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 18 Apr 2019 17:35:28 +0100 Subject: [PATCH 02/76] Added async methods to gsf decoder in seperate class --- mediagrains/async.py | 29 ++ mediagrains_async/__init__.py | 529 ++++++++++++++++++++++++++++++++++ mediagrains_async/aiobytes.py | 124 ++++++++ setup.py | 11 +- tests/atest_gsf.py | 137 +++++++++ tox.ini | 6 +- 6 files changed, 831 insertions(+), 5 deletions(-) create mode 100644 mediagrains/async.py create mode 100644 mediagrains_async/__init__.py create mode 100644 mediagrains_async/aiobytes.py create mode 100644 tests/atest_gsf.py diff --git a/mediagrains/async.py b/mediagrains/async.py new file mode 100644 index 0000000..72f5f21 --- /dev/null +++ b/mediagrains/async.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Asyncio compatible layer for mediagrains, but only available in python 3.6+ +""" + +from sys import version_info + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + from mediagrains_async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError + + __all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError"] +else: + __all__ = [] diff --git a/mediagrains_async/__init__.py b/mediagrains_async/__init__.py new file mode 100644 index 0000000..38cfd9e --- /dev/null +++ b/mediagrains_async/__init__.py @@ -0,0 +1,529 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for handling mediagrains in pure python asyncio compatibility layer. +""" + +from uuid import UUID +from os import SEEK_SET +from datetime import datetime +from fractions import Fraction + +from mediatimestamp import Timestamp + +from .aiobytes import AsyncIOBytes, AsyncLazyLoaderUnloadedError + +from mediagrains import Grain +from mediagrains.gsf import GSFDecodeBadVersionError, GSFDecodeBadFileTypeError, GSFDecodeError + +__all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError"] + + +class AsyncGSFBlock(): + """A single block in a GSF file, accessed asynchronously + + Has coroutines to read various types from the block. + Must be used as an asynchronous context manager, which will automatically decode the block tag and size, + exposed by the `tag` and `size` attributes. + """ + def __init__(self, file_data, want_tag=None, raise_on_wrong_tag=False): + """Constructor. Unlike the synchronous version does not record the start byte of the block in `block_start` + + :param file_data: An asynchronous readable file-like object positioned at the start of the block + :param want_tag: If set to a tag string, and in a context manager, skip any block without that tag + :param raise_on_wrong_tag: Set to True to raise a GSFDecodeError if the next block isn't `want_tag` + """ + self.file_data = file_data + self.want_tag = want_tag + self.raise_on_wrong_tag = raise_on_wrong_tag + + self.size = None + self.block_start = None + + async def __aenter__(self): + """When used as a context manager record file position and read block size and tag on entry + + - When entering a block, tag and size should be read + - If tag doesn't decode, a GSFDecodeError should be raised + - If want_tag was supplied to the constructor, skip blocks that don't have that tag + - Unless raise_on_wrong_tag was also supplied, in which case raise + + :returns: Instance of AsyncGSFBlock + :raises GSFDecodeError: If the block tag failed to decode as UTF-8, or an unwanted tag was found""" + + self.block_start = await self.file_data.tell() # In binary mode, this should always be in bytes + + while True: + tag_bytes = await self.file_data.read(4) + + try: + self.tag = tag_bytes.decode(encoding="utf-8") + except UnicodeDecodeError: + raise GSFDecodeError( + "Bytes {!r} at location {} do not make a valid tag for a block".format(tag_bytes, self.block_start), + self.block_start + ) + + self.size = await self.read_uint(4) + + if self.want_tag is None or self.tag == self.want_tag: + return self + elif self.tag != self.want_tag and self.raise_on_wrong_tag: + raise GSFDecodeError("Wanted tag {} but got {} at {}".format(self.want_tag, self.tag, self.block_start), + self.block_start) + else: + await self.file_data.seek(self.block_start + self.size, SEEK_SET) + self.block_start = await self.file_data.tell() + + async def __aexit__(self, *args): + """When used as a context manager, exiting context should seek to the block end""" + await self.file_data.seek(self.block_start + self.size, SEEK_SET) + + async def has_child_block(self, strict_blocks=True): + """Checks if there is space for another child block in this block + + Returns true if there is space for another child block (i.e. >= 8 bytes) in this block. + If strict_blocks=True, this block only contains other blocks rather than any other data. As a result, if there + are bytes left, but not enough for another block, raise a GSFDecodeError. + Must be used in a context manager. + + :param strict_blocks: Set to True to raise if a partial block is found + :returns: True if there is spaces for another block + :raises GSFDecodeError: If there is a partial block and strict=True + """ + assert self.size is not None, "has_child_block() only works in a context manager" + + bytes_remaining = await self.get_remaining() + if bytes_remaining >= 8: + return True + elif bytes_remaining != 0 and strict_blocks: + position = await self.file_data.tell() + raise GSFDecodeError("Found a partial block (or parent too small) in '{}' at {}".format(self.tag, position), + position) + else: + return False + + async def child_blocks(self, strict_blocks=True): + """Asynchronous generator for each child block - each yielded block sits within the context manager + + Must be used in a context manager. + + :param strict_blocks: Set to True to raise if a partial block is found + :yields: GSFBlock for child (already acting as a context manager) + :raises GSFDecodeError: If there is a partial block and strict=True + """ + while await self.has_child_block(strict_blocks=strict_blocks): + async with AsyncGSFBlock(self.file_data) as child_block: + yield child_block + + async def get_remaining(self): + """Get the number of bytes left in this block + + Only works in a context manager, will raise an AssertionError if not + + :returns: Number of bytes left in the block + """ + assert self.size is not None, "get_remaining() only works in a context manager" + return (self.block_start + self.size) - await self.file_data.tell() + + async def read_uint(self, length): + """Read an unsigned integer of length `length` + + :param length: Number of bytes used to store the integer + :returns: Unsigned integer + :raises EOFError: If there are fewer than `length` bytes left in the source + """ + r = 0 + uint_bytes = bytes(await self.file_data.read(length)) + + if len(uint_bytes) != length: + raise EOFError("Unable to read enough bytes from source") + + for n in range(0, length): + r += (uint_bytes[n] << (n*8)) + return r + + async def read_bool(self): + """Read a boolean value + + :returns: Boolean value + :raises EOFError: If there are no more bytes left in the source""" + n = await self.read_uint(1) + return (n != 0) + + async def read_sint(self, length): + """Read a 2's complement signed integer + + :param length: Number of bytes used to store the integer + :returns: Signed integer + :raises EOFError: If there are fewer than `length` bytes left in the source + """ + r = await self.read_uint(length) + if (r >> ((8*length) - 1)) == 1: + r -= (1 << (8*length)) + return r + + async def read_string(self, length): + """Read a fixed-length string, treating it as UTF-8 + + :param length: Number of bytes in the string + :returns: String + :raises EOFError: If there are fewer than `length` bytes left in the source + """ + string_data = await self.file_data.read(length) + if (len(string_data) != length): + raise EOFError("Unable to read enough bytes from source") + + return string_data.decode(encoding='utf-8') + + async def read_varstring(self): + """Read a variable length string + + Reads a 2 byte uint to get the string length, then reads a string of that length + + :returns: String + :raises EOFError: If there are too few bytes left in the source + """ + length = await self.read_uint(2) + return await self.read_string(length) + + async def read_uuid(self): + """Read a UUID + + :returns: UUID + :raises EOFError: If there are fewer than l bytes left in the source + """ + uuid_data = await self.file_data.read(16) + + if (len(uuid_data) != 16): + raise EOFError("Unable to read enough bytes from source") + + return UUID(bytes=uuid_data) + + async def read_timestamp(self): + """Read a date-time (with seconds resolution) stored in 7 bytes + + :returns: Datetime + :raises EOFError: If there are fewer than 7 bytes left in the source + """ + year = await self.read_sint(2) + month = await self.read_uint(1) + day = await self.read_uint(1) + hour = await self.read_uint(1) + minute = await self.read_uint(1) + second = await self.read_uint(1) + return datetime(year, month, day, hour, minute, second) + + async def read_ippts(self): + """Read a mediatimestamp.Timestamp + + :returns: Timestamp + :raises EOFError: If there are fewer than 10 bytes left in the source + """ + secs = await self.read_uint(6) + nano = await self.read_uint(4) + return Timestamp(secs, nano) + + async def read_rational(self): + """Read a rational (fraction) + + If numerator or denominator is 0, returns Fraction(0) + + :returns: fraction.Fraction + :raises EOFError: If there are fewer than 8 bytes left in the source + """ + numerator = await self.read_uint(4) + denominator = await self.read_uint(4) + if numerator == 0 or denominator == 0: + return Fraction(0) + else: + return Fraction(numerator, denominator) + + +class AsyncGSFDecoder(object): + """A decoder for GSF format that operates asynchronously. + + Provides coroutines to decode the header of a GSF file, followed by an asynchronous generator to get each grain, + wrapped in some grain method (mediagrains.Grain by default.) + """ + def __init__(self, file_data, parse_grain=Grain, **kwargs): + """Constructor + + :param parse_grain: Function that takes a (metadata dict, buffer) and returns a grain representation + :param file_data: A readable asynchronous file io-like object similar to those provided by aiofiles + """ + self.Grain = parse_grain + self.file_data = file_data + + async def _decode_ssb_header(self): + """Find and read the SSB header in the GSF file + + :returns: (major, minor) version tuple + :raises GSFDecodeBadFileTypeError: If the SSB tag shows this isn't a GSF file + """ + ssb_block = AsyncGSFBlock(self.file_data) + + tag = await ssb_block.read_string(8) + + if tag != "SSBBgrsg": + raise GSFDecodeBadFileTypeError("File lacks correct header", ssb_block.block_start, tag) + + major = await ssb_block.read_uint(2) + minor = await ssb_block.read_uint(2) + + return (major, minor) + + async def _decode_head(self, head_block): + """Decode the "head" block and extract ID, created date, segments and tags + + :param head_block: AsyncGSFBlock representing the "head" block + :returns: Head block as a dict + """ + head = {} + head['id'] = await head_block.read_uuid() + head['created'] = await head_block.read_timestamp() + + head['segments'] = [] + head['tags'] = [] + + # Read head block children + async for head_child in head_block.child_blocks(): + # Parse a segment block + if head_child.tag == "segm": + segm = {} + segm['local_id'] = await head_child.read_uint(2) + segm['id'] = await head_child.read_uuid() + segm['count'] = await head_child.read_sint(8) + segm['tags'] = [] + + # Segment blocks can have child tags as well + while await head_child.has_child_block(): + async with AsyncGSFBlock(self.file_data) as segm_tag: + if segm_tag.tag == "tag ": + key = await segm_tag.read_varstring() + value = await segm_tag.read_varstring() + segm['tags'].append((key, value)) + + head['segments'].append(segm) + + # Parse a tag block + elif head_child.tag == "tag ": + key = await head_child.read_varstring() + value = await head_child.read_varstring() + head['tags'].append((key, value)) + + return head + + async def _decode_tils(self, tils_block): + """Decode timelabels (tils) block + + :param tils_block: Instance of AsyncGSFBlock() representing a "gbhd" block + :returns: tils block as a dict + """ + tils = [] + timelabel_count = await tils_block.read_uint(2) + for i in range(0, timelabel_count): + tag = await tils_block.read_string(16) + tag = tag.strip("\x00") + count = await tils_block.read_uint(4) + rate = await tils_block.read_rational() + drop = await tils_block.read_bool() + + tils.append({'tag': tag, + 'timelabel': {'frames_since_midnight': count, + 'frame_rate_numerator': rate.numerator, + 'frame_rate_denominator': rate.denominator, + 'drop_frame': drop}}) + + return tils + + async def _decode_gbhd(self, gbhd_block): + """Decode grain block header ("gbhd") to get grain metadata + + :param gbhd_block: Instance of AsyncGSFBlock() representing a "gbhd" block + :returns: Grain data dict + :raises GSFDecodeError: If "gbhd" block contains an unkown child block + """ + meta = { + "grain": { + } + } + + meta['grain']['source_id'] = await gbhd_block.read_uuid() + meta['grain']['flow_id'] = await gbhd_block.read_uuid() + await self.file_data.seek(16, 1) # Skip over deprecated byte array + meta['grain']['origin_timestamp'] = await gbhd_block.read_ippts() + meta['grain']['sync_timestamp'] = await gbhd_block.read_ippts() + meta['grain']['rate'] = await gbhd_block.read_rational() + meta['grain']['duration'] = await gbhd_block.read_rational() + + async for gbhd_child in gbhd_block.child_blocks(): + if gbhd_child.tag == "tils": + meta['grain']['timelabels'] = await self._decode_tils(gbhd_child) + elif gbhd_child.tag == "vghd": + meta['grain']['grain_type'] = 'video' + meta['grain']['cog_frame'] = {} + meta['grain']['cog_frame']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['layout'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['width'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['height'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['extension'] = await gbhd_child.read_uint(4) + + src_aspect_ratio = await gbhd_child.read_rational() + if src_aspect_ratio != 0: + meta['grain']['cog_frame']['source_aspect_ratio'] = { + 'numerator': src_aspect_ratio.numerator, + 'denominator': src_aspect_ratio.denominator + } + + pixel_aspect_ratio = await gbhd_child.read_rational() + if pixel_aspect_ratio != 0: + meta['grain']['cog_frame']['pixel_aspect_ratio'] = { + 'numerator': pixel_aspect_ratio.numerator, + 'denominator': pixel_aspect_ratio.denominator + } + + meta['grain']['cog_frame']['components'] = [] + if await gbhd_child.has_child_block(): + async with AsyncGSFBlock(self.file_data) as comp_block: + if comp_block.tag != "comp": + continue # Skip unknown/unexpected block + + comp_count = await comp_block.read_uint(2) + offset = 0 + for i in range(0, comp_count): + comp = {} + comp['width'] = await comp_block.read_uint(4) + comp['height'] = await comp_block.read_uint(4) + comp['stride'] = await comp_block.read_uint(4) + comp['length'] = await comp_block.read_uint(4) + comp['offset'] = offset + offset += comp['length'] + meta['grain']['cog_frame']['components'].append(comp) + + elif gbhd_child.tag == 'cghd': + meta['grain']['grain_type'] = "coded_video" + meta['grain']['cog_coded_frame'] = {} + meta['grain']['cog_coded_frame']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['layout'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['origin_width'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['origin_height'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['coded_width'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['coded_height'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['is_key_frame'] = await gbhd_child.read_bool() + meta['grain']['cog_coded_frame']['temporal_offset'] = await gbhd_child.read_sint(4) + + if await gbhd_child.has_child_block(): + async with AsyncGSFBlock(self.file_data) as unof_block: + meta['grain']['cog_coded_frame']['unit_offsets'] = [] + + unit_offsets = await unof_block.read_uint(2) + for i in range(0, unit_offsets): + meta['grain']['cog_coded_frame']['unit_offsets'].append(await unof_block.read_uint(4)) + + elif gbhd_child.tag == "aghd": + meta['grain']['grain_type'] = "audio" + meta['grain']['cog_audio'] = {} + meta['grain']['cog_audio']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_audio']['channels'] = await gbhd_child.read_uint(2) + meta['grain']['cog_audio']['samples'] = await gbhd_child.read_uint(4) + meta['grain']['cog_audio']['sample_rate'] = await gbhd_child.read_uint(4) + + elif gbhd_child.tag == "cahd": + meta['grain']['grain_type'] = "coded_audio" + meta['grain']['cog_coded_audio'] = {} + meta['grain']['cog_coded_audio']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['channels'] = await gbhd_child.read_uint(2) + meta['grain']['cog_coded_audio']['samples'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['priming'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['remainder'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['sample_rate'] = await gbhd_child.read_uint(4) + + elif gbhd_child.tag == "eghd": + meta['grain']['grain_type'] = "event" + else: + raise GSFDecodeError( + "Unknown type {} at offset {}".format(gbhd_child.tag, gbhd_child.block_start), + gbhd_child.block_start, + length=gbhd_child.size + ) + + return meta + + async def decode_file_headers(self): + """Verify the file is a supported version, and get the file header + + :returns: File header data (segments and tags) as a dict + :raises GSFDecodeBadVersionError: If the file version is not supported + :raises GSFDecodeBadFileTypeError: If this isn't a GSF file + :raises GSFDecodeError: If the file doesn't have a "head" block + """ + (major, minor) = await self._decode_ssb_header() + if (major, minor) != (7, 0): + raise GSFDecodeBadVersionError("Unknown Version {}.{}".format(major, minor), 0, major, minor) + + try: + async with AsyncGSFBlock(self.file_data, want_tag="head") as head_block: + return await self._decode_head(head_block) + except EOFError: + raise GSFDecodeError("No head block found in file", self.file_data.tell()) + + async def grains(self, local_ids=None, load_lazily=True): + """Asynchronous generator to get grains from the GSF file. Skips blocks which aren't "grai". + + The file_data will be positioned after the `grai` block. + + :param local_ids: A list of local-ids to include in the output. If None (the default) then all local-ids will be + included + :param skip_data: If True, grain data blocks will be seeked over and only grain headers will be read + :param load_lazily: If True, the grains returned will be designed to lazily load data from the underlying stream + only when it is needed. These grain data elements will have an extra 'load' coroutine for + triggering this load, and accessing data in their data element without first awaiting this + coroutine will raise an exception. + :yields: (Grain, local_id) tuple for each grain + :raises GSFDecodeError: If grain is invalid (e.g. no "gbhd" child) + """ + while True: + try: + async with AsyncGSFBlock(self.file_data, want_tag="grai") as grai_block: + if grai_block.size == 0: + return # Terminator block reached + + local_id = await grai_block.read_uint(2) + + if local_ids is not None and local_id not in local_ids: + continue + + async with AsyncGSFBlock(self.file_data, want_tag="gbhd", raise_on_wrong_tag=True) as gbhd_block: + meta = await self._decode_gbhd(gbhd_block) + + data = None + + if await grai_block.has_child_block(): + async with AsyncGSFBlock(self.file_data, want_tag="grdt") as grdt_block: + if await grdt_block.get_remaining() > 0: + if load_lazily: + data = AsyncIOBytes(self.file_data, + await self.file_data.tell(), + await grdt_block.get_remaining()) + else: + data = await self.file_data.read(await grdt_block.get_remaining()) + + yield (self.Grain(meta, data), local_id) + except EOFError: + return # We ran out of grains to read and hit EOF diff --git a/mediagrains_async/aiobytes.py b/mediagrains_async/aiobytes.py new file mode 100644 index 0000000..8d1c237 --- /dev/null +++ b/mediagrains_async/aiobytes.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +A simple wrapper class AsyncIOBytes which is an asynchronous version of IOBytes +""" + +from collections.abc import Sequence + + +__all__ = ["AsyncIOBytes"] + + +class AsyncLazyLoaderUnloadedError (Exception): + pass + + +class AsyncLazyLoader (object): + """An object that can be loaded asynchronously as needed. + + In most cases this class should be subclassed to make actually useful classes, but technically it's not + an abstract base class because it *can* be used directly if needed. + + The constructor takes a coroutine taking no parameters which returns an object as its only parameter. + + Unlike the synchronous version loading is not automatic, but can be triggered by awaiting the load coroutine. + """ + + _attributes = [] + + def __init__(self, loader): + """ + :param loader: a coroutine taking no parameters which returns an object + """ + self._object = None + self._loader = loader + + def __getattribute__(self, attr): + if attr in (['_object', '_loader', '__repr__', 'load'] + type(self)._attributes): + return object.__getattribute__(self, attr) + else: + if object.__getattribute__(self, '_object') is None: + raise AsyncLazyLoaderUnloadedError( + "A call to {} was made on an object that hasn't been loaded".format(attr) + ) + return getattr(object.__getattribute__(self, '_object'), attr) + + def __repr__(self): + if object.__getattribute__(self, '_object') is None: + return object.__repr__(self) + else: + return repr(object.__getattribute__(self, '_object')) + + def __setattr__(self, attr, value): + if attr in ['_object', '_loader'] + type(self)._attributes: + return object.__setattr__(self, attr, value) + else: + if object.__getattribute__(self, '_object') is None: + raise AsyncLazyLoaderUnloadedError( + "A call to set {} was made on an object that hasn't been loaded".format(attr) + ) + return setattr(object.__getattribute__(self, '_object'), attr, value) + + async def load(self): + """Await this coroutine to load the actual object""" + _loader = object.__getattribute__(self, "_loader") + object.__setattr__(self, "_object", await _loader()) + + +class AsyncIOBytes (AsyncLazyLoader, Sequence): + """A Bytes-like object that is backed by a seekable Asynchronous IO stream and can be loaded asynchronously by + awaiting its load coroutine. + """ + + _attributes = ['_istream', '_start', '_length', '__len__'] + + def __init__(self, istream, start, length): + """ + :param istream: An instance of an asynchronous seekable readable + :param start: The value to pass to istream.seek to get to the start of this data + :param start: The length of the data + """ + async def __loadbytes(): + loc = await self._istream.tell() + try: + await self._istream.seek(self._start) + _bytes = await self._istream.read(self._length) + finally: + await self._istream.seek(loc) + return _bytes + + AsyncLazyLoader.__init__(self, __loadbytes) + self._istream = istream + self._start = start + self._length = length + + def __len__(self): + if self._object is None: + return self._length + else: + return len(self._object) + + def __repr__(self): + if self._object is None: + return "AsyncIOBytes({!r}, {!r}, {!r})".format(self._istream, self._start, self._length) + else: + return repr(self._object) + + def __getitem__(self, *args, **kwargs): + return self.__getattribute__('__getitem__')(*args, **kwargs) diff --git a/setup.py b/setup.py index 2cb4e62..0f6b27e 100644 --- a/setup.py +++ b/setup.py @@ -16,13 +16,13 @@ # from __future__ import print_function + from setuptools import setup -import os +from sys import version_info packages = { 'mediagrains' : 'mediagrains' } -package_names = packages.keys() packages_required = [ "mediatimestamp >= 1.2.0", @@ -33,6 +33,13 @@ deps_required = [] + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + packages['mediagrains_async'] = 'mediagrains_async' + + +package_names = list(packages.keys()) + setup(name="mediagrains", version="2.5.3", description="Simple utility for grain-based media", diff --git a/tests/atest_gsf.py b/tests/atest_gsf.py new file mode 100644 index 0000000..9a79d89 --- /dev/null +++ b/tests/atest_gsf.py @@ -0,0 +1,137 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest import TestCase +import mock +import asyncio +import warnings +import aiofiles +from datetime import datetime +from uuid import UUID + +from mediagrains.async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError +from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN, CODEDVIDEOGRAIN, CODEDAUDIOGRAIN, EVENTGRAIN + + +def async_test(f): + def __inner(*args, **kwargs): + loop = asyncio.get_event_loop() + loop.set_debug(True) + E = None + warns = [] + + try: + with warnings.catch_warnings(record=True) as warns: + loop.run_until_complete(f(*args, **kwargs)) + + except AssertionError as e: + E = e + except Exception as e: + E = e + + for w in warns: + warnings.showwarning(w.message, + w.category, + w.filename, + w.lineno) + if E is None: + args[0].assertEqual(len(warns), 0, + msg="asyncio subsystem generated warnings due to unawaited coroutines") + else: + raise E + + return __inner + + +class TestAsyncGSFBlock (TestCase): + @async_test + async def test_decode_headers(self): + async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: + UUT = AsyncGSFDecoder(file_data=video_data_stream) + head = await UUT.decode_file_headers() + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 22)) + self.assertEqual(head['id'], UUID('163fd9b7-bef4-4d92-8488-31f3819be008')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('c6a3d3ff-74c0-446d-b59e-de1041f27e8a')) + + @async_test + async def test_generate_grains(self): + """Test that the generator yields each grain""" + async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: + UUT = AsyncGSFDecoder(file_data=video_data_stream) + await UUT.decode_file_headers() + + grain_count = 0 + async for (grain, local_id) in UUT.grains(): + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) + self.assertEqual(grain.flow_id, UUID('6e55f251-f75a-4d56-b3af-edb8b7993c3c')) + + grain_count += 1 + + self.assertEqual(10, grain_count) # There are 10 grains in the file + + @async_test + async def test_local_id_filtering(self): + async with aiofiles.open('examples/interleaved.gsf', 'rb') as interleaved_data_stream: + UUT = AsyncGSFDecoder(file_data=interleaved_data_stream) + await UUT.decode_file_headers() + + local_ids = set() + flow_ids = set() + async for (grain, local_id) in UUT.grains(): + local_ids.add(local_id) + flow_ids.add(grain.flow_id) + + self.assertEqual(local_ids, set([1, 2])) + self.assertEqual(flow_ids, set([UUID('28e4e09e-3517-11e9-8da2-5065f34ed007'), + UUID('2472f38e-3517-11e9-8da2-5065f34ed007')])) + + await interleaved_data_stream.seek(0) + await UUT.decode_file_headers() + + async for (grain, local_id) in UUT.grains(local_ids=[1]): + self.assertIsInstance(grain, AUDIOGRAIN) + self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(grain.flow_id, UUID('28e4e09e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(local_id, 1) + + await interleaved_data_stream.seek(0) + await UUT.decode_file_headers() + + async for (grain, local_id) in UUT.grains(local_ids=[2]): + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(grain.flow_id, UUID('2472f38e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(local_id, 2) + + @async_test + async def test_lazy_loading(self): + async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: + UUT = AsyncGSFDecoder(file_data=video_data_stream) + await UUT.decode_file_headers() + + grains = [grain async for (grain, local_id) in UUT.grains()] + + with self.assertRaises(AsyncLazyLoaderUnloadedError): + grains[0].data[0] + + await grains[0].data.load() + + self.assertEqual(grains[0].data[0:1024], b"\x10" * 1024) diff --git a/tox.ini b/tox.ini index 10caf61..973c290 100644 --- a/tox.ini +++ b/tox.ini @@ -8,10 +8,10 @@ envlist = py27, py3 [testenv] commands = - coverage run --source=./mediagrains -m unittest discover -s tests - coverage annotate - coverage report + py27: python -m unittest discover -s tests -p test_*.py + py3: python -m unittest discover -s tests -p *test_*.py deps = hypothesis >= 4.0.0 mock coverage + py3: aiofiles From cd1bde8476bb286097e3f2e7d4c0c569851e8633 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 18 Apr 2019 17:57:00 +0100 Subject: [PATCH 03/76] Support for asynchronous iteration and context management in AsyncGSFDecoder --- mediagrains_async/__init__.py | 21 +++++++++++- tests/atest_gsf.py | 64 ++++++++++++++--------------------- 2 files changed, 46 insertions(+), 39 deletions(-) diff --git a/mediagrains_async/__init__.py b/mediagrains_async/__init__.py index 38cfd9e..1c028e5 100644 --- a/mediagrains_async/__init__.py +++ b/mediagrains_async/__init__.py @@ -269,6 +269,8 @@ def __init__(self, file_data, parse_grain=Grain, **kwargs): """ self.Grain = parse_grain self.file_data = file_data + self.head = None + self.start_loc = None async def _decode_ssb_header(self): """Find and read the SSB header in the GSF file @@ -479,10 +481,24 @@ async def decode_file_headers(self): try: async with AsyncGSFBlock(self.file_data, want_tag="head") as head_block: - return await self._decode_head(head_block) + self.head = await self._decode_head(head_block) + return self.head except EOFError: raise GSFDecodeError("No head block found in file", self.file_data.tell()) + async def __aenter__(self): + self.start_loc = await self.file_data.tell() + await self.decode_file_headers() + return self + + async def __aexit__(self, *args, **kwargs): + self.head = None + await self.file_data.seek(self.start_loc) + self.start_loc = None + + def __aiter__(self): + return self.grains() + async def grains(self, local_ids=None, load_lazily=True): """Asynchronous generator to get grains from the GSF file. Skips blocks which aren't "grai". @@ -498,6 +514,9 @@ async def grains(self, local_ids=None, load_lazily=True): :yields: (Grain, local_id) tuple for each grain :raises GSFDecodeError: If grain is invalid (e.g. no "gbhd" child) """ + if self.head is None: + await self.decode_file_headers() + while True: try: async with AsyncGSFBlock(self.file_data, want_tag="grai") as grai_block: diff --git a/tests/atest_gsf.py b/tests/atest_gsf.py index 9a79d89..328278d 100644 --- a/tests/atest_gsf.py +++ b/tests/atest_gsf.py @@ -74,11 +74,8 @@ async def test_decode_headers(self): async def test_generate_grains(self): """Test that the generator yields each grain""" async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: - UUT = AsyncGSFDecoder(file_data=video_data_stream) - await UUT.decode_file_headers() - grain_count = 0 - async for (grain, local_id) in UUT.grains(): + async for (grain, local_id) in AsyncGSFDecoder(file_data=video_data_stream).grains(): self.assertIsInstance(grain, VIDEOGRAIN) self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) self.assertEqual(grain.flow_id, UUID('6e55f251-f75a-4d56-b3af-edb8b7993c3c')) @@ -90,44 +87,35 @@ async def test_generate_grains(self): @async_test async def test_local_id_filtering(self): async with aiofiles.open('examples/interleaved.gsf', 'rb') as interleaved_data_stream: - UUT = AsyncGSFDecoder(file_data=interleaved_data_stream) - await UUT.decode_file_headers() - - local_ids = set() - flow_ids = set() - async for (grain, local_id) in UUT.grains(): - local_ids.add(local_id) - flow_ids.add(grain.flow_id) - - self.assertEqual(local_ids, set([1, 2])) - self.assertEqual(flow_ids, set([UUID('28e4e09e-3517-11e9-8da2-5065f34ed007'), - UUID('2472f38e-3517-11e9-8da2-5065f34ed007')])) - - await interleaved_data_stream.seek(0) - await UUT.decode_file_headers() - - async for (grain, local_id) in UUT.grains(local_ids=[1]): - self.assertIsInstance(grain, AUDIOGRAIN) - self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) - self.assertEqual(grain.flow_id, UUID('28e4e09e-3517-11e9-8da2-5065f34ed007')) - self.assertEqual(local_id, 1) - - await interleaved_data_stream.seek(0) - await UUT.decode_file_headers() - - async for (grain, local_id) in UUT.grains(local_ids=[2]): - self.assertIsInstance(grain, VIDEOGRAIN) - self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) - self.assertEqual(grain.flow_id, UUID('2472f38e-3517-11e9-8da2-5065f34ed007')) - self.assertEqual(local_id, 2) + async with AsyncGSFDecoder(file_data=interleaved_data_stream) as UUT: + local_ids = set() + flow_ids = set() + async for (grain, local_id) in UUT.grains(): + local_ids.add(local_id) + flow_ids.add(grain.flow_id) + + self.assertEqual(local_ids, set([1, 2])) + self.assertEqual(flow_ids, set([UUID('28e4e09e-3517-11e9-8da2-5065f34ed007'), + UUID('2472f38e-3517-11e9-8da2-5065f34ed007')])) + + async with AsyncGSFDecoder(file_data=interleaved_data_stream) as UUT: + async for (grain, local_id) in UUT.grains(local_ids=[1]): + self.assertIsInstance(grain, AUDIOGRAIN) + self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(grain.flow_id, UUID('28e4e09e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(local_id, 1) + + async with AsyncGSFDecoder(file_data=interleaved_data_stream) as UUT: + async for (grain, local_id) in UUT.grains(local_ids=[2]): + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(grain.flow_id, UUID('2472f38e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(local_id, 2) @async_test async def test_lazy_loading(self): async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: - UUT = AsyncGSFDecoder(file_data=video_data_stream) - await UUT.decode_file_headers() - - grains = [grain async for (grain, local_id) in UUT.grains()] + grains = [grain async for (grain, local_id) in AsyncGSFDecoder(file_data=video_data_stream).grains()] with self.assertRaises(AsyncLazyLoaderUnloadedError): grains[0].data[0] From 81e921aa120bb16e616a62afeb8d650a5ca05039 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 23 Apr 2019 16:14:40 +0100 Subject: [PATCH 04/76] v2.6.0.dev1: version bump --- CHANGELOG.md | 3 +++ setup.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 949bef3..7ed5f0e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Mediagrains Library Changelog +## 2.6.0 (Under development) +- Added support for async methods to gsf decoder in python 3.6+ + ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/setup.py b/setup.py index 0f6b27e..c73d7a8 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.5.3", + version="2.6.0.dev1", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From 3f66b1a0a59ccc813aa87337f4b85a3786918b27 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 23 Apr 2019 17:00:51 +0100 Subject: [PATCH 05/76] Added artifactory upload to Jenkinsfile for dev branch --- Jenkinsfile | 40 +++++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index cd1b440..1317c9c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -23,10 +23,11 @@ pipeline { buildDiscarder(logRotator(numToKeepStr: '10')) // Discard old builds } triggers { - cron(env.BRANCH_NAME == 'master' ? 'H H(0-8) * * *' : '') // Build master some time every morning + cron((env.BRANCH_NAME == 'master' || env.BRANCH_NAME == 'dev')? 'H H(0-8) * * *' : '') // Build master some time every morning } parameters { - booleanParam(name: "FORCE_PYUPLOAD", defaultValue: false, description: "Force Python artifact upload") + booleanParam(name: "FORCE_PYPIUPLOAD", defaultValue: false, description: "Force Python artifact upload to PyPi") + booleanParam(name: "FORCE_PYUPLOAD", defaultValue: false, description: "Force Python artifact upload to internal BBC repo") booleanParam(name: "FORCE_DEBUPLOAD", defaultValue: false, description: "Force Debian package upload") booleanParam(name: "FORCE_DOCSUPLOAD", defaultValue: false, description: "Force docs upload") } @@ -162,10 +163,11 @@ pipeline { when { anyOf { expression { return params.FORCE_PYUPLOAD } + expression { return params.FORCE_PYPIUPLOAD } expression { return params.FORCE_DEBUPLOAD } expression { return params.FORCE_DOCSUPLOAD } expression { - bbcShouldUploadArtifacts(branches: ["master"]) + bbcShouldUploadArtifacts(branches: ["master", "dev"]) } } } @@ -175,7 +177,7 @@ pipeline { anyOf { expression { return params.FORCE_DOCSUPLOAD } expression { - bbcShouldUploadArtifacts(branches: ["master"]) + bbcShouldUploadArtifacts(branches: ["master", "dev"]) } } } @@ -186,7 +188,7 @@ pipeline { stage ("Upload to PyPi") { when { anyOf { - expression { return params.FORCE_PYUPLOAD } + expression { return params.FORCE_PYPIUPLOAD } expression { bbcShouldUploadArtifacts(branches: ["master"]) } @@ -211,6 +213,34 @@ pipeline { } } } + stage ("Upload to Artifactory") { + when { + anyOf { + expression { return params.FORCE_PYUPLOAD } + expression { + bbcShouldUploadArtifacts(branches: ["dev"]) + } + } + } + steps { + script { + env.artifactoryUpload_result = "FAILURE" + } + bbcGithubNotify(context: "artifactory/upload", status: "PENDING") + sh 'rm -rf dist/*' + bbcMakeGlobalWheel("py27") + bbcMakeGlobalWheel("py3") + bbcTwineUpload(toxenv: "py3", pypi: false) + script { + env.artifactoryUpload_result = "SUCCESS" // This will only run if the steps above succeeded + } + } + post { + always { + bbcGithubNotify(context: "artifactory/upload", status: env.artifactoryUpload_result) + } + } + } stage ("upload deb") { when { anyOf { From 10302c5c8c249a2db8d9631f1a3a946ff0c488a9 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 23 Apr 2019 17:17:11 +0100 Subject: [PATCH 06/76] Added exclusions to flake8 (which is running in py27 at the moment) --- Jenkinsfile | 2 +- mediagrains/async.py | 2 +- setup.py | 2 +- tests/atest_gsf.py | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1317c9c..dce9f7e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -51,7 +51,7 @@ pipeline { } bbcGithubNotify(context: "lint/flake8", status: "PENDING") // Run the linter - sh 'flake8' + sh 'flake8 --exclude=.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg,._*,./tests/atest_*,./mediagrains_async/*' script { env.lint_result = "SUCCESS" // This will only run if the sh above succeeded } diff --git a/mediagrains/async.py b/mediagrains/async.py index 72f5f21..1de2c01 100644 --- a/mediagrains/async.py +++ b/mediagrains/async.py @@ -22,7 +22,7 @@ from sys import version_info if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - from mediagrains_async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError + from mediagrains_async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError # noqa: F401 __all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError"] else: diff --git a/setup.py b/setup.py index c73d7a8..915691e 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ from sys import version_info packages = { - 'mediagrains' : 'mediagrains' + 'mediagrains': 'mediagrains' } packages_required = [ diff --git a/tests/atest_gsf.py b/tests/atest_gsf.py index 328278d..3cca566 100644 --- a/tests/atest_gsf.py +++ b/tests/atest_gsf.py @@ -17,7 +17,6 @@ # from unittest import TestCase -import mock import asyncio import warnings import aiofiles @@ -25,7 +24,7 @@ from uuid import UUID from mediagrains.async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError -from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN, CODEDVIDEOGRAIN, CODEDAUDIOGRAIN, EVENTGRAIN +from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN def async_test(f): From b0e4eb2e4d5c373e8867ce47c0b589706e9958da Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 23 Apr 2019 17:30:42 +0100 Subject: [PATCH 07/76] Added mediagrains_async to Manifest.in --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 55cbb0c..a3c4119 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,5 +3,6 @@ include tox.ini include COPYING recursive-include examples *.gsf recursive-include tests *.py +recursive-include mediagrains_async *.py include ICLA.md include LICENSE.md From 2ef1debfd0461693ffeafc0221b63038fc4ebf7d Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 24 Apr 2019 10:56:41 +0100 Subject: [PATCH 08/76] Restrict Jenkinsfile to run builds on bionic hosts only --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index dce9f7e..2551dff 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,7 +16,7 @@ pipeline { agent { - label "ubuntu&&apmm-slave" + label "ubuntu&&apmm-slave&&18.04" } options { ansiColor('xterm') // Add support for coloured output From fc91f627b72ce20dbf5b5b15cf125e3af6fa0beb Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 24 Apr 2019 11:03:58 +0100 Subject: [PATCH 09/76] Split linting for python2.7 and python3 --- .flake8 | 3 ++- Jenkinsfile | 30 ++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/.flake8 b/.flake8 index 167f324..7860623 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,4 @@ [flake8] max-line-length = 160 -exclude = .git,.tox,dist,deb_dist,__pycache__ \ No newline at end of file +exclude = .git,.tox,dist,deb_dist,__pycache__,._* +ignore = E121,E123,E126,E226,E24,E704,W503,W504,W606 \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile index 2551dff..161be2d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,24 +44,42 @@ pipeline { } stage ("Tests") { parallel { - stage ("Linting Check") { + stage ("Py2.7 Linting Check") { steps { script { - env.lint_result = "FAILURE" + env.lint27_result = "FAILURE" } - bbcGithubNotify(context: "lint/flake8", status: "PENDING") + bbcGithubNotify(context: "lint/flake8_27", status: "PENDING") // Run the linter - sh 'flake8 --exclude=.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg,._*,./tests/atest_*,./mediagrains_async/*' + sh 'python2.7 -m flake8 --exclude=.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg,._*,./tests/atest_*,./mediagrains_async/*' script { - env.lint_result = "SUCCESS" // This will only run if the sh above succeeded + env.lint27_result = "SUCCESS" // This will only run if the sh above succeeded } } post { always { - bbcGithubNotify(context: "lint/flake8", status: env.lint_result) + bbcGithubNotify(context: "lint/flake8_27", status: env.lint27_result) } } } + stage ("Py3 Linting Check") { + steps { + script { + env.lint3_result = "FAILURE" + } + bbcGithubNotify(context: "lint/flake8_3", status: "PENDING") + // Run the linter + sh 'python3 -m flake8' + script { + env.lint3_result = "SUCCESS" // This will only run if the sh above succeeded + } + } + post { + always { + bbcGithubNotify(context: "lint/flake8_3", status: env.lint3_result) + } + } + } stage ("Build Docs") { steps { sh 'TOXDIR=/tmp/$(basename ${WORKSPACE})/tox-docs make docs' From d6fe208bf5633327b3f398bb966ca203027f5030 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 24 Apr 2019 16:17:34 +0100 Subject: [PATCH 10/76] Added loads to async interface, together with significant testing --- mediagrains/async.py | 4 +- mediagrains_async/__init__.py | 82 ++++++- mediagrains_async/bytesaio.py | 77 ++++++ tests/atest_gsf.py | 445 +++++++++++++++++++++++++++++++++- 4 files changed, 592 insertions(+), 16 deletions(-) create mode 100644 mediagrains_async/bytesaio.py diff --git a/mediagrains/async.py b/mediagrains/async.py index 1de2c01..16434a0 100644 --- a/mediagrains/async.py +++ b/mediagrains/async.py @@ -22,8 +22,8 @@ from sys import version_info if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - from mediagrains_async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError # noqa: F401 + from mediagrains_async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads # noqa: F401 - __all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError"] + __all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError", "loads"] else: __all__ = [] diff --git a/mediagrains_async/__init__.py b/mediagrains_async/__init__.py index 1c028e5..9d8be9f 100644 --- a/mediagrains_async/__init__.py +++ b/mediagrains_async/__init__.py @@ -19,19 +19,46 @@ Library for handling mediagrains in pure python asyncio compatibility layer. """ +import asyncio + from uuid import UUID from os import SEEK_SET from datetime import datetime from fractions import Fraction -from mediatimestamp import Timestamp +from mediatimestamp.immutable import Timestamp from .aiobytes import AsyncIOBytes, AsyncLazyLoaderUnloadedError +from .bytesaio import BytesAIO from mediagrains import Grain from mediagrains.gsf import GSFDecodeBadVersionError, GSFDecodeBadFileTypeError, GSFDecodeError -__all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError"] +__all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError", "loads"] + + +async def loads(s, cls=None, parse_grain=None, **kwargs): + """Deserialise a GSF file from a string (or similar) into python, + returns a pair of (head, segments) where head is a python dict + containing general metadata from the file, and segments is a dictionary + mapping numeric segment ids to lists of Grain objects. + + If you wish to use a custom AsyncGSFDecoder subclass pass it as cls, if you + wish to use a custom Grain constructor pass it as parse_grain. The + defaults are AsyncGSFDecoder and Grain. Extra kwargs will be passed to the + decoder constructor. + + The custome parse_grain method can be an asynchronous coroutine or a synchronous callable. + + There is no real benefit to using this over the synchronous version, since access to an in-memory buffer is + always going to be synchronous, but this can be used for convenience where you don't want multiple code paths + for synchronous and asynchronous code.""" + if cls is None: + cls = AsyncGSFDecoder + if parse_grain is None: + parse_grain = Grain + dec = cls(BytesAIO(s), parse_grain=parse_grain, **kwargs) + return await dec.decode() class AsyncGSFBlock(): @@ -255,6 +282,12 @@ async def read_rational(self): return Fraction(numerator, denominator) +def asynchronise(f): + async def __inner(*args, **kwargs): + return f(*args, **kwargs) + return __inner + + class AsyncGSFDecoder(object): """A decoder for GSF format that operates asynchronously. @@ -264,10 +297,13 @@ class AsyncGSFDecoder(object): def __init__(self, file_data, parse_grain=Grain, **kwargs): """Constructor - :param parse_grain: Function that takes a (metadata dict, buffer) and returns a grain representation + :param parse_grain: Function or coroutine that takes a (metadata dict, buffer) and returns a grain + representation :param file_data: A readable asynchronous file io-like object similar to those provided by aiofiles """ self.Grain = parse_grain + if not asyncio.iscoroutine(self.Grain): + self.Grain = asynchronise(self.Grain) self.file_data = file_data self.head = None self.start_loc = None @@ -278,8 +314,9 @@ async def _decode_ssb_header(self): :returns: (major, minor) version tuple :raises GSFDecodeBadFileTypeError: If the SSB tag shows this isn't a GSF file """ - ssb_block = AsyncGSFBlock(self.file_data) + ssb_block = AsyncGSFBlock(self.file_data) + ssb_block.block_start = await self.file_data.tell() tag = await ssb_block.read_string(8) if tag != "SSBBgrsg": @@ -475,6 +512,9 @@ async def decode_file_headers(self): :raises GSFDecodeBadFileTypeError: If this isn't a GSF file :raises GSFDecodeError: If the file doesn't have a "head" block """ + if self.head is not None: + return self.head + (major, minor) = await self._decode_ssb_header() if (major, minor) != (7, 0): raise GSFDecodeBadVersionError("Unknown Version {}.{}".format(major, minor), 0, major, minor) @@ -484,17 +524,18 @@ async def decode_file_headers(self): self.head = await self._decode_head(head_block) return self.head except EOFError: - raise GSFDecodeError("No head block found in file", self.file_data.tell()) + raise GSFDecodeError("No head block found in file", await self.file_data.tell()) async def __aenter__(self): - self.start_loc = await self.file_data.tell() + if self.start_loc is None: + self.start_loc = await self.file_data.tell() await self.decode_file_headers() return self async def __aexit__(self, *args, **kwargs): - self.head = None - await self.file_data.seek(self.start_loc) - self.start_loc = None + if self.start_loc is not None: + await self.file_data.seek(self.start_loc) + self.start_loc = None def __aiter__(self): return self.grains() @@ -514,8 +555,7 @@ async def grains(self, local_ids=None, load_lazily=True): :yields: (Grain, local_id) tuple for each grain :raises GSFDecodeError: If grain is invalid (e.g. no "gbhd" child) """ - if self.head is None: - await self.decode_file_headers() + await self.decode_file_headers() while True: try: @@ -543,6 +583,24 @@ async def grains(self, local_ids=None, load_lazily=True): else: data = await self.file_data.read(await grdt_block.get_remaining()) - yield (self.Grain(meta, data), local_id) + yield (await self.Grain(meta, data), local_id) except EOFError: return # We ran out of grains to read and hit EOF + + async def decode(self, load_lazily=False): + """Decode a GSF formatted bytes object + + :param load_lazily: If True, the grains returned will be designed to lazily load data from the underlying stream + only when it is needed. These grain data elements will have an extra 'load' coroutine for + triggering this load, and accessing data in their data element without first awaiting this + coroutine will raise an exception. + :returns: A dictionary mapping sequence ids to lists of GRAIN objects (or subclasses of such). + """ + segments = {} + async with self: + async for (grain, local_id) in self.grains(load_lazily=load_lazily): + if local_id not in segments: + segments[local_id] = [] + segments[local_id].append(grain) + + return (self.head, segments) diff --git a/mediagrains_async/bytesaio.py b/mediagrains_async/bytesaio.py new file mode 100644 index 0000000..c914730 --- /dev/null +++ b/mediagrains_async/bytesaio.py @@ -0,0 +1,77 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +A simple wrapper class BytesAIO which is an asynchronous version of BytesIO +""" + +import asyncio +from io import BytesIO + + +def asynchronise(f): + async def __inner(*args, **kwargs): + return f(*args, **kwargs) + return __inner + + +class BytesAIO(object): + def __init__(self, b): + """Constructor + + :param s: A bytes object""" + self._bytesio = BytesIO(b) + + def __getattr__(self, attr): + if attr in ['getbuffer', + 'getvalue', + 'closed']: + return getattr(self._bytesio, attr) + elif attr in ['read1', + 'readinto1', + 'detach', + 'read', + 'readinto', + 'write', + 'close', + 'fileno', + 'flush', + 'isatty', + 'readable', + 'readline', + 'readlines', + 'seek', + 'seekable', + 'tell', + 'truncate', + 'writeable', + 'writelines']: + return asynchronise(getattr(self._bytesio, attr)) + else: + raise AttributeError + + async def __aenter__(self): + return self._bytesio.__enter__() + + async def __aexit__(self, *args, **kwargs): + return self._bytesio.__exit__(*args, **kwargs) + + def __aiter__(self): + return self + + def __anext__(self): + return next(self._bytesio) diff --git a/tests/atest_gsf.py b/tests/atest_gsf.py index 3cca566..709f347 100644 --- a/tests/atest_gsf.py +++ b/tests/atest_gsf.py @@ -23,8 +23,35 @@ from datetime import datetime from uuid import UUID -from mediagrains.async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError -from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN +from mediagrains.async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads +from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN, EVENTGRAIN, CODEDVIDEOGRAIN, CODEDAUDIOGRAIN +from mediagrains.gsf import GSFDecodeError +from mediagrains.gsf import GSFEncodeError +from mediagrains.gsf import GSFDecodeBadVersionError +from mediagrains.gsf import GSFDecodeBadFileTypeError +from mediagrains.gsf import GSFEncodeAddToActiveDump +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat + +from mediatimestamp.immutable import Timestamp, TimeOffset + + +with open('examples/video.gsf', 'rb') as f: + VIDEO_DATA = f.read() + +with open('examples/coded_video.gsf', 'rb') as f: + CODED_VIDEO_DATA = f.read() + +with open('examples/audio.gsf', 'rb') as f: + AUDIO_DATA = f.read() + +with open('examples/coded_audio.gsf', 'rb') as f: + CODED_AUDIO_DATA = f.read() + +with open('examples/event.gsf', 'rb') as f: + EVENT_DATA = f.read() + +with open('examples/interleaved.gsf', 'rb') as f: + INTERLEAVED_DATA = f.read() def async_test(f): @@ -122,3 +149,417 @@ async def test_lazy_loading(self): await grains[0].data.load() self.assertEqual(grains[0].data[0:1024], b"\x10" * 1024) + + +class TestAsyncGSFLoads(TestCase): + @async_test + async def test_loads_video(self): + (head, segments) = await loads(VIDEO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 22)) + self.assertEqual(head['id'], UUID('163fd9b7-bef4-4d92-8488-31f3819be008')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('c6a3d3ff-74c0-446d-b59e-de1041f27e8a')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + ots = Timestamp(1420102800, 0) + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.grain_type, "video") + self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) + self.assertEqual(grain.flow_id, UUID('6e55f251-f75a-4d56-b3af-edb8b7993c3c')) + self.assertEqual(grain.origin_timestamp, ots) + ots += TimeOffset.from_nanosec(20000000) + + self.assertEqual(grain.format, CogFrameFormat.U8_420) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.width, 480) + self.assertEqual(grain.height, 270) + + self.assertEqual(len(grain.components), 3) + + self.assertEqual(grain.components[0].width, 480) + self.assertEqual(grain.components[0].height, 270) + self.assertEqual(grain.components[0].stride, 480) + self.assertEqual(grain.components[0].length, 480*270) + self.assertEqual(grain.components[0].offset, 0) + + self.assertEqual(grain.components[1].width, 240) + self.assertEqual(grain.components[1].height, 135) + self.assertEqual(grain.components[1].stride, 240) + self.assertEqual(grain.components[1].length, 240*135) + self.assertEqual(grain.components[1].offset, 480*270) + + self.assertEqual(grain.components[2].width, 240) + self.assertEqual(grain.components[2].height, 135) + self.assertEqual(grain.components[2].stride, 240) + self.assertEqual(grain.components[2].length, 240*135) + self.assertEqual(grain.components[2].offset, 480*270 + 240*135) + + self.assertEqual(len(grain.data), grain.components[0].length + grain.components[1].length + grain.components[2].length) + + @async_test + async def test_loads_audio(self): + (head, segments) = await loads(AUDIO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 37, 50)) + self.assertEqual(head['id'], UUID('781fb6c5-d22f-4df5-ba69-69059efd5ced')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('fc4c5533-3fad-4437-93c0-8668cb876578')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + start_ots = Timestamp(1420102800, 0) + ots = start_ots + total_samples = 0 + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, AUDIOGRAIN) + self.assertEqual(grain.grain_type, "audio") + self.assertEqual(grain.source_id, UUID('38bfd902-b35f-40d6-9ecf-dc95869130cf')) + self.assertEqual(grain.flow_id, UUID('f1c8c095-5739-46f4-9bbc-3d7050c9ba23')) + self.assertEqual(grain.origin_timestamp, ots) + + self.assertEqual(grain.format, CogAudioFormat.S24_INTERLEAVED) + self.assertEqual(grain.channels, 2) + self.assertEqual(grain.samples, 1024) + self.assertEqual(grain.sample_rate, 48000) + + self.assertEqual(len(grain.data), 6144) + total_samples += grain.samples + ots = start_ots + TimeOffset.from_count(total_samples, grain.sample_rate) + + @async_test + async def test_loads_coded_video(self): + (head, segments) = await loads(CODED_VIDEO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 41)) + self.assertEqual(head['id'], UUID('8875f02c-2528-4566-9e9a-23efc3a9bbe5')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('bdfa1343-0a20-4a98-92f5-0f7f0eb75479')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + ots = Timestamp(1420102800, 0) + unit_offsets = [ + ([0, 6, 34, 42, 711, 719], 36114), + ([0, 6, 14], 380), + ([0, 6, 14], 8277), + ([0, 6, 14], 4914), + ([0, 6, 14], 4961), + ([0, 6, 14], 3777), + ([0, 6, 14], 1950), + ([0, 6, 14], 31), + ([0, 6, 14], 25), + ([0, 6, 14], 6241)] + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, CODEDVIDEOGRAIN) + self.assertEqual(grain.grain_type, "coded_video") + self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) + self.assertEqual(grain.flow_id, UUID('b6b05efb-6067-4ff8-afac-ec735a85674e')) + self.assertEqual(grain.origin_timestamp, ots) + ots += TimeOffset.from_nanosec(20000000) + + self.assertEqual(grain.format, CogFrameFormat.H264) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.origin_width, 1920) + self.assertEqual(grain.origin_height, 1080) + self.assertEqual(grain.coded_width, 0) + self.assertEqual(grain.coded_height, 0) + self.assertEqual(grain.length, unit_offsets[0][1]) + self.assertEqual(grain.temporal_offset, 0) + self.assertEqual(grain.unit_offsets, unit_offsets[0][0]) + unit_offsets.pop(0) + + @async_test + async def test_loads_rejects_incorrect_type_file(self): + with self.assertRaises(GSFDecodeBadFileTypeError) as cm: + await loads(b"POTATO23\x07\x00\x00\x00") + self.assertEqual(cm.exception.offset, 0) + self.assertEqual(cm.exception.filetype, "POTATO23") + + @async_test + async def test_loads_rejects_incorrect_version_file(self): + with self.assertRaises(GSFDecodeBadVersionError) as cm: + await loads(b"SSBBgrsg\x08\x00\x03\x00") + self.assertEqual(cm.exception.offset, 0) + self.assertEqual(cm.exception.major, 8) + self.assertEqual(cm.exception.minor, 3) + + @async_test + async def test_loads_rejects_bad_head_tag(self): + with self.assertRaises(GSFDecodeError) as cm: + await loads(b"SSBBgrsg\x07\x00\x00\x00" + + b"\xff\xff\xff\xff\x00\x00\x00\x00") + self.assertEqual(cm.exception.offset, 12) + + @async_test + async def test_loads_raises_exception_without_head(self): + with self.assertRaises(GSFDecodeError) as cm: + await loads(b"SSBBgrsg\x07\x00\x00\x00") + self.assertEqual(cm.exception.offset, 12) + + @async_test + async def test_loads_skips_unknown_block_before_head(self): + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + b"dumy\x08\x00\x00\x00" + + b"head\x1f\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f") + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(head['segments'], []) + self.assertEqual(head['tags'], []) + + @async_test + async def test_loads_skips_unknown_block_instead_of_segm(self): + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + b"head\x27\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + b"dumy\x08\x00\x00\x00") + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(head['segments'], []) + self.assertEqual(head['tags'], []) + + @async_test + async def test_loads_skips_unknown_block_before_segm(self): + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x49\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"dumy\x08\x00\x00\x00") + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x00\x00\x00\x00\x00\x00\x00\x00"))) + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['local_id'], 1) + self.assertEqual(head['segments'][0]['id'], UUID('d3e191f0-1594-11e8-91ac-dca904824eec')) + self.assertEqual(head['segments'][0]['tags'], []) + self.assertEqual(head['segments'][0]['count'], 0) + self.assertEqual(head['tags'], []) + + @async_test + async def test_loads_raises_when_head_too_small(self): + with self.assertRaises(GSFDecodeError) as cm: + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x29\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"dumy\x08\x00\x00\x00") + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x00\x00\x00\x00\x00\x00\x00\x00"))) + + self.assertEqual(cm.exception.offset, 51) + + @async_test + async def test_loads_raises_when_segm_too_small(self): + with self.assertRaises(GSFDecodeError) as cm: + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x21\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x00\x00\x00\x00\x00\x00\x00\x00"))) + + self.assertEqual(cm.exception.offset, 77) + + @async_test + async def test_loads_decodes_tils(self): + src_id = UUID('c707d64c-1596-11e8-a3fb-dca904824eec') + flow_id = UUID('da78668a-1596-11e8-a577-dca904824eec') + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x01\x00\x00\x00\x00\x00\x00\x00")) + + (b"grai\x8d\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x83\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + (b"tils\x27\x00\x00\x00" + + b"\x01\x00" + + b"dummy timecode\x00\x00" + + b"\x07\x00\x00\x00" + + b"\x19\x00\x00\x00\x01\x00\x00\x00" + + b"\x00"))) + + (b"grai\x08\x00\x00\x00")) + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['local_id'], 1) + self.assertEqual(head['segments'][0]['id'], UUID('d3e191f0-1594-11e8-91ac-dca904824eec')) + self.assertEqual(head['segments'][0]['tags'], []) + self.assertEqual(head['segments'][0]['count'], 1) + self.assertEqual(head['tags'], []) + self.assertEqual(segments[1][0].timelabels, [{'tag': 'dummy timecode', 'timelabel': {'frames_since_midnight': 7, + 'frame_rate_numerator': 25, + 'frame_rate_denominator': 1, + 'drop_frame': False}}]) + + @async_test + async def test_loads_raises_when_grain_type_unknown(self): + with self.assertRaises(GSFDecodeError) as cm: + src_id = UUID('c707d64c-1596-11e8-a3fb-dca904824eec') + flow_id = UUID('da78668a-1596-11e8-a577-dca904824eec') + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x01\x00\x00\x00\x00\x00\x00\x00")) + + (b"grai\x8d\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x83\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + (b"dumy\x08\x00\x00\x00")))) + + self.assertEqual(cm.exception.offset, 179) + + @async_test + async def test_loads_decodes_empty_grains(self): + src_id = UUID('c707d64c-1596-11e8-a3fb-dca904824eec') + flow_id = UUID('da78668a-1596-11e8-a577-dca904824eec') + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x02\x00\x00\x00\x00\x00\x00\x00")) + + (b"grai\x66\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x5c\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00")) + + (b"dumy\x08\x00\x00\x00") + + (b"grai\x6E\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x5c\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00") + + (b"grdt\x08\x00\x00\x00")) + + (b"dumy\x08\x00\x00\x00")) + + self.assertEqual(len(segments[1]), 2) + self.assertEqual(segments[1][0].grain_type, "empty") + self.assertIsNone(segments[1][0].data) + self.assertEqual(segments[1][1].grain_type, "empty") + self.assertIsNone(segments[1][1].data) + + @async_test + async def test_loads_coded_audio(self): + (head, segments) = await loads(CODED_AUDIO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 5)) + self.assertEqual(head['id'], UUID('2dbc5889-15f1-427c-b727-5201dd3b053c')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('6ca3a217-f2c2-4344-832b-6ea87bc5ddb8')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + start_ots = Timestamp(1420102800, 0) + ots = start_ots + total_samples = 0 + lengths = [603, + 690, + 690, + 689, + 690, + 690, + 689, + 690, + 690, + 689] + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, CODEDAUDIOGRAIN) + self.assertEqual(grain.grain_type, "coded_audio") + self.assertEqual(grain.source_id, UUID('38bfd902-b35f-40d6-9ecf-dc95869130cf')) + self.assertEqual(grain.flow_id, UUID('e615296b-ff40-4d95-8398-6a4082305f3a')) + self.assertEqual(grain.origin_timestamp, ots) + + self.assertEqual(grain.format, CogAudioFormat.AAC) + self.assertEqual(grain.channels, 2) + self.assertEqual(grain.samples, 1024) + self.assertEqual(grain.priming, 0) + self.assertEqual(grain.remainder, 0) + self.assertEqual(grain.sample_rate, 48000) + + self.assertEqual(len(grain.data), lengths[0]) + lengths.pop(0) + total_samples += grain.samples + ots = start_ots + TimeOffset.from_count(total_samples, grain.sample_rate) + + @async_test + async def test_loads_event(self): + self.maxDiff = None + (head, segments) = await loads(EVENT_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 37, 35)) + self.assertEqual(head['id'], UUID('3c45f8b5-1853-4723-808a-ab5cbf598ccc')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('db095cb5-050b-4b8c-92e8-31351422e93a')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + start_ots = Timestamp(1447176512, 400000000) + ots = start_ots + line = '' + seqnum = 3107787894242499264 + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, EVENTGRAIN) + self.assertEqual(grain.grain_type, "event") + self.assertEqual(grain.source_id, UUID('2db4268e-82ef-49f9-bc0f-1726e8352d76')) + self.assertEqual(grain.flow_id, UUID('5333bae9-0768-4e31-be1c-fbd5dc2e34ac')) + self.assertEqual(grain.origin_timestamp, ots) + + self.assertEqual(grain.event_type, 'urn:x-ipstudio:format:event.ttext.ebuttlive') + self.assertEqual(grain.topic, '') + self.assertEqual(len(grain.event_data), 1) + self.assertEqual(grain.event_data[0].path, 'Subs') + self.assertEqual(grain.event_data[0].pre, line) + line = '\nv1.0140gb' + ots.to_iso8601_utc() + '' # NOQA + self.assertEqual(grain.event_data[0].post, line, msg="\n\nExpected:\n\n{!r}\n\nGot:\n\n{!r}\n\n".format(line, grain.event_data[0].post)) + + ots = ots + TimeOffset.from_nanosec(20000000) + seqnum += 20000000 From afbf83d4452555bd260e2948a73cd117486666e7 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 24 Apr 2019 16:48:52 +0100 Subject: [PATCH 11/76] Made python3 tox environment explicitly python3.6 --- Jenkinsfile | 20 ++++++++++---------- tox.ini | 7 ++++--- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 161be2d..34dc43b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -62,7 +62,7 @@ pipeline { } } } - stage ("Py3 Linting Check") { + stage ("Py36 Linting Check") { steps { script { env.lint3_result = "FAILURE" @@ -108,18 +108,18 @@ pipeline { stage ("Python 3 Unit Tests") { steps { script { - env.py3_result = "FAILURE" + env.py36_result = "FAILURE" } - bbcGithubNotify(context: "tests/py3", status: "PENDING") + bbcGithubNotify(context: "tests/py36", status: "PENDING") // Use a workdirectory in /tmp to avoid shebang length limitation - sh 'tox -e py3 --recreate --workdir /tmp/$(basename ${WORKSPACE})/tox-py3' + sh 'tox -e py36 --recreate --workdir /tmp/$(basename ${WORKSPACE})/tox-py36' script { - env.py3_result = "SUCCESS" // This will only run if the sh above succeeded + env.py36_result = "SUCCESS" // This will only run if the sh above succeeded } } post { always { - bbcGithubNotify(context: "tests/py3", status: env.py3_result) + bbcGithubNotify(context: "tests/py36", status: env.py36_result) } } } @@ -219,8 +219,8 @@ pipeline { bbcGithubNotify(context: "pypi/upload", status: "PENDING") sh 'rm -rf dist/*' bbcMakeGlobalWheel("py27") - bbcMakeGlobalWheel("py3") - bbcTwineUpload(toxenv: "py3", pypi: true) + bbcMakeGlobalWheel("py36") + bbcTwineUpload(toxenv: "py36", pypi: true) script { env.pypiUpload_result = "SUCCESS" // This will only run if the steps above succeeded } @@ -247,8 +247,8 @@ pipeline { bbcGithubNotify(context: "artifactory/upload", status: "PENDING") sh 'rm -rf dist/*' bbcMakeGlobalWheel("py27") - bbcMakeGlobalWheel("py3") - bbcTwineUpload(toxenv: "py3", pypi: false) + bbcMakeGlobalWheel("py36") + bbcTwineUpload(toxenv: "py36", pypi: false) script { env.artifactoryUpload_result = "SUCCESS" // This will only run if the steps above succeeded } diff --git a/tox.ini b/tox.ini index 973c290..951e1ef 100644 --- a/tox.ini +++ b/tox.ini @@ -4,14 +4,15 @@ # and then run "tox" from this directory. [tox] -envlist = py27, py3 +envlist = py27, py36 [testenv] commands = py27: python -m unittest discover -s tests -p test_*.py - py3: python -m unittest discover -s tests -p *test_*.py + py35: python -m unittest discover -s tests -p test_*.py + py36: python -m unittest discover -s tests -p *test_*.py deps = hypothesis >= 4.0.0 mock coverage - py3: aiofiles + py36: aiofiles From acbff83f5e3c34957ed54a6363a9f1e19d78d858 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 24 Apr 2019 16:52:32 +0100 Subject: [PATCH 12/76] linting --- mediagrains_async/bytesaio.py | 1 - tests/atest_gsf.py | 36 +++++++++++++++++------------------ 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/mediagrains_async/bytesaio.py b/mediagrains_async/bytesaio.py index c914730..7786e8e 100644 --- a/mediagrains_async/bytesaio.py +++ b/mediagrains_async/bytesaio.py @@ -19,7 +19,6 @@ A simple wrapper class BytesAIO which is an asynchronous version of BytesIO """ -import asyncio from io import BytesIO diff --git a/tests/atest_gsf.py b/tests/atest_gsf.py index 709f347..8f03512 100644 --- a/tests/atest_gsf.py +++ b/tests/atest_gsf.py @@ -26,10 +26,8 @@ from mediagrains.async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN, EVENTGRAIN, CODEDVIDEOGRAIN, CODEDAUDIOGRAIN from mediagrains.gsf import GSFDecodeError -from mediagrains.gsf import GSFEncodeError from mediagrains.gsf import GSFDecodeBadVersionError from mediagrains.gsf import GSFDecodeBadFileTypeError -from mediagrains.gsf import GSFEncodeAddToActiveDump from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat from mediatimestamp.immutable import Timestamp, TimeOffset @@ -387,23 +385,23 @@ async def test_loads_decodes_tils(self): b"\x01\x00" + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + b"\x01\x00\x00\x00\x00\x00\x00\x00")) + - (b"grai\x8d\x00\x00\x00" + - b"\x01\x00" + - (b"gbhd\x83\x00\x00\x00" + - src_id.bytes + - flow_id.bytes + - b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + - b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + - b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + - b"\x00\x00\x00\x00\x00\x00\x00\x00" + - b"\x00\x00\x00\x00\x00\x00\x00\x00" + - (b"tils\x27\x00\x00\x00" + - b"\x01\x00" + - b"dummy timecode\x00\x00" + - b"\x07\x00\x00\x00" + - b"\x19\x00\x00\x00\x01\x00\x00\x00" + - b"\x00"))) + - (b"grai\x08\x00\x00\x00")) + (b"grai\x8d\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x83\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + (b"tils\x27\x00\x00\x00" + + b"\x01\x00" + + b"dummy timecode\x00\x00" + + b"\x07\x00\x00\x00" + + b"\x19\x00\x00\x00\x01\x00\x00\x00" + + b"\x00"))) + + (b"grai\x08\x00\x00\x00")) self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) From 7933686ff5acdf69a9c32d07be55f710d6229abe Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 24 Apr 2019 17:10:17 +0100 Subject: [PATCH 13/76] Moved mediagrains.async to mediagrains.asyncio to avoid reserved name clash --- .flake8 | 2 +- mediagrains/{async.py => asyncio.py} | 0 tests/atest_gsf.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename mediagrains/{async.py => asyncio.py} (100%) diff --git a/.flake8 b/.flake8 index 7860623..624546a 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,4 @@ [flake8] max-line-length = 160 exclude = .git,.tox,dist,deb_dist,__pycache__,._* -ignore = E121,E123,E126,E226,E24,E704,W503,W504,W606 \ No newline at end of file +ignore = E121,E123,E126,E226,E24,E704,W503,W504 \ No newline at end of file diff --git a/mediagrains/async.py b/mediagrains/asyncio.py similarity index 100% rename from mediagrains/async.py rename to mediagrains/asyncio.py diff --git a/tests/atest_gsf.py b/tests/atest_gsf.py index 8f03512..e48ac44 100644 --- a/tests/atest_gsf.py +++ b/tests/atest_gsf.py @@ -23,7 +23,7 @@ from datetime import datetime from uuid import UUID -from mediagrains.async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads +from mediagrains.asyncio import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN, EVENTGRAIN, CODEDVIDEOGRAIN, CODEDAUDIOGRAIN from mediagrains.gsf import GSFDecodeError from mediagrains.gsf import GSFDecodeBadVersionError From 41ce075a1ce6a0eeac05a17e9cfe7000c7eb9209 Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Thu, 25 Apr 2019 15:21:31 +0100 Subject: [PATCH 14/76] add Grain.origin_timerange method --- CHANGELOG.md | 1 + mediagrains/grain.py | 10 ++++++++-- setup.py | 2 +- tests/test_grain.py | 23 ++++++++++++++++++++++- 4 files changed, 32 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ed5f0e..b2e930d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## 2.6.0 (Under development) - Added support for async methods to gsf decoder in python 3.6+ +- Added `Grain.origin_timerange` method. ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/mediagrains/grain.py b/mediagrains/grain.py index bd3fd78..2377ecc 100644 --- a/mediagrains/grain.py +++ b/mediagrains/grain.py @@ -27,7 +27,7 @@ from six import string_types from uuid import UUID -from mediatimestamp.immutable import Timestamp, TimeOffset +from mediatimestamp.immutable import Timestamp, TimeOffset, TimeRange from collections import Sequence, MutableSequence, Mapping from fractions import Fraction from copy import copy, deepcopy @@ -120,12 +120,15 @@ class GRAIN(Sequence): How long the data would be expected to be based on what's listed in the metadata -In addition there is a method provided for convenience: +In addition these methods are provided for convenience: final_origin_timestamp() The origin timestamp of the final sample in the grain. For most grain types this is the same as origin_timestamp, but not for audio grains. + +origin_timerange() + The origin time range covered by the samples in the grain. """ def __init__(self, meta, data): self.meta = meta @@ -247,6 +250,9 @@ def origin_timestamp(self, value): def final_origin_timestamp(self): return self.origin_timestamp + def origin_timerange(self): + return TimeRange(self.origin_timestamp, self.final_origin_timestamp(), TimeRange.INCLUSIVE) + @property def sync_timestamp(self): return Timestamp.from_tai_sec_nsec(self.meta['grain']['sync_timestamp']) diff --git a/setup.py b/setup.py index 915691e..f951bdc 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev1", + version="2.6.0.dev2", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', diff --git a/tests/test_grain.py b/tests/test_grain.py index 5edb4e3..80f1e9d 100644 --- a/tests/test_grain.py +++ b/tests/test_grain.py @@ -20,7 +20,7 @@ import uuid from mediagrains import Grain, VideoGrain, AudioGrain, CodedVideoGrain, CodedAudioGrain, EventGrain from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat -from mediatimestamp.immutable import Timestamp, TimeOffset +from mediatimestamp.immutable import Timestamp, TimeOffset, TimeRange import mock from fractions import Fraction import json @@ -41,6 +41,7 @@ def test_empty_grain_creation(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -88,6 +89,7 @@ def test_empty_grain_creation_with_odd_data(self): self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.source_id, src_id) self.assertEqual(grain.flow_id, flow_id) @@ -110,6 +112,7 @@ def test_empty_grain_creation_with_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -131,6 +134,7 @@ def test_empty_grain_creation_with_ots_and_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -200,6 +204,7 @@ def test_empty_grain_with_meta(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -246,6 +251,7 @@ def test_empty_grain_setters(self): grain.origin_timestamp = ots self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) grain.sync_timestamp = sts self.assertEqual(grain.sync_timestamp, sts) @@ -329,6 +335,7 @@ def test_video_grain_create_YUV422_10bit(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -539,6 +546,7 @@ def test_video_grain_with_numeric_identifiers(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -644,6 +652,7 @@ def test_video_grain_create_with_ots_and_no_sts(self): self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) @@ -659,6 +668,7 @@ def test_video_grain_create_with_no_ots_and_no_sts(self): self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) @@ -825,6 +835,7 @@ def test_audio_grain_create_S16_PLANES(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -857,6 +868,7 @@ def test_audio_grain_create_fills_in_missing_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -887,6 +899,7 @@ def test_audio_grain_create_fills_in_missing_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(cts, cts + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1319,6 +1332,7 @@ def test_coded_audio_grain_create_MP1(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1358,6 +1372,7 @@ def test_coded_audio_grain_create_without_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1396,6 +1411,7 @@ def test_coded_audio_grain_create_without_sts_or_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(cts, cts + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1587,6 +1603,7 @@ def test_event_grain_create(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1616,6 +1633,7 @@ def test_event_grain_create_without_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1641,6 +1659,7 @@ def test_event_grain_create_without_sts_or_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1662,6 +1681,7 @@ def test_event_grain_create_fills_in_empty_meta(self): self.assertEqual(grain.grain_type, "event") self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -1725,6 +1745,7 @@ def test_event_grain_create_from_meta_and_data(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) From 90789d716c16b4b5c96e05a141b9a3519382e1d1 Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Thu, 25 Apr 2019 16:03:38 +0100 Subject: [PATCH 15/76] add Grain.normalise_time method --- CHANGELOG.md | 1 + mediagrains/grain.py | 23 +++++++++++ tests/test_grain.py | 96 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 120 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2e930d..31bb256 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## 2.6.0 (Under development) - Added support for async methods to gsf decoder in python 3.6+ - Added `Grain.origin_timerange` method. +- Added `Grain.normalise_time` method. ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/mediagrains/grain.py b/mediagrains/grain.py index 2377ecc..5051d8d 100644 --- a/mediagrains/grain.py +++ b/mediagrains/grain.py @@ -129,6 +129,10 @@ class GRAIN(Sequence): origin_timerange() The origin time range covered by the samples in the grain. + +normalise_time(value) + Returns a normalised Timestamp, TimeOffset or TimeRange using the video frame rate or audio sample rate. + """ def __init__(self, meta, data): self.meta = meta @@ -253,6 +257,9 @@ def final_origin_timestamp(self): def origin_timerange(self): return TimeRange(self.origin_timestamp, self.final_origin_timestamp(), TimeRange.INCLUSIVE) + def normalise_time(self, value): + return value + @property def sync_timestamp(self): return Timestamp.from_tai_sec_nsec(self.meta['grain']['sync_timestamp']) @@ -892,6 +899,11 @@ def __init__(self, meta, data): self.meta['grain']['cog_frame']['layout'] = int(self.meta['grain']['cog_frame']['layout']) self.components = VIDEOGRAIN.COMPONENT_LIST(self) + def normalise_time(self, value): + if self.rate == 0: + return value + return value.normalise(self.rate.numerator, self.rate.denominator) + @property def format(self): return CogFrameFormat(self.meta['grain']['cog_frame']['format']) @@ -1070,6 +1082,11 @@ def __init__(self, meta, data): self.meta['grain']['cog_coded_frame']['format'] = int(self.meta['grain']['cog_coded_frame']['format']) self.meta['grain']['cog_coded_frame']['layout'] = int(self.meta['grain']['cog_coded_frame']['layout']) + def normalise_time(self, value): + if self.rate == 0: + return value + return value.normalise(self.rate.numerator, self.rate.denominator) + @property def format(self): return CogFrameFormat(self.meta['grain']['cog_coded_frame']['format']) @@ -1299,6 +1316,9 @@ def __init__(self, meta, data): def final_origin_timestamp(self): return (self.origin_timestamp + TimeOffset.from_count(self.samples - 1, self.sample_rate, 1)) + def normalise_time(self, value): + return value.normalise(self.sample_rate, 1) + @property def format(self): return CogAudioFormat(self.meta['grain']['cog_audio']['format']) @@ -1433,6 +1453,9 @@ def __init__(self, meta, data): def final_origin_timestamp(self): return (self.origin_timestamp + TimeOffset.from_count(self.samples - 1, self.sample_rate, 1)) + def normalise_time(self, value): + return value.normalise(self.sample_rate, 1) + @property def format(self): return CogAudioFormat(self.meta['grain']['cog_coded_audio']['format']) diff --git a/tests/test_grain.py b/tests/test_grain.py index 80f1e9d..db8f611 100644 --- a/tests/test_grain.py +++ b/tests/test_grain.py @@ -818,6 +818,28 @@ def test_grain_makes_videograin_without_data(self): self.assertEqual(grain.length, 0) self.assertEqual(grain.expected_length, 8192*1080) + def test_video_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, + rate=Fraction(25, 1), + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(25, 1)) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots).normalise(25, 1)) + def test_audio_grain_create_S16_PLANES(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -1046,6 +1068,29 @@ def test_grain_makes_audiograin(self): self.assertEqual(grain.meta, meta) self.assertEqual(grain.data, data) + def test_audio_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:2") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = AudioGrain(src_id, flow_id, + cog_audio_format=CogAudioFormat.S16_PLANES, + channels=2, samples=1920, sample_rate=48000) + + final_ts = ots + TimeOffset.from_count(1920 - 1, 48000, 1) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(48000, 1)) + self.assertEqual(grain.final_origin_timestamp(), final_ts) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts).normalise(48000, 1)) + def test_coded_video_grain_create_VC2(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -1310,6 +1355,29 @@ def test_grain_makes_codedvideograin(self): self.assertEqual(grain.meta, meta) self.assertEqual(grain.data, data) + def test_coded_video_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = CodedVideoGrain(src_id, flow_id, origin_timestamp=ots, + rate=Fraction(25, 1), + cog_frame_format=CogFrameFormat.VC2, + origin_width=1920, origin_height=1080, + cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(25, 1)) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots).normalise(25, 1)) + def test_coded_audio_grain_create_MP1(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -1587,6 +1655,34 @@ def test_grain_makes_codedaudiograin(self): self.assertEqual(grain.meta, meta) self.assertEqual(grain.data, data) + def test_coded_audio_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:2") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = CodedAudioGrain(src_id, flow_id, origin_timestamp=ots, + cog_audio_format=CogAudioFormat.MP1, + samples=1920, + channels=6, + priming=0, + remainder=0, + sample_rate=48000, + length=15360) + + final_ts = ots + TimeOffset.from_count(1920 - 1, 48000, 1) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(48000, 1)) + self.assertEqual(grain.final_origin_timestamp(), final_ts) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts).normalise(48000, 1)) + def test_event_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") From cdba06f5cefefcacc3471cc33ac533c583d005d7 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 25 Apr 2019 17:12:45 +0100 Subject: [PATCH 16/76] Jenkinsfile fix to flake8 inclusions and exclusions --- Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 34dc43b..27354c9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -51,7 +51,7 @@ pipeline { } bbcGithubNotify(context: "lint/flake8_27", status: "PENDING") // Run the linter - sh 'python2.7 -m flake8 --exclude=.svn,CVS,.bzr,.hg,.git,__pycache__,.tox,.eggs,*.egg,._*,./tests/atest_*,./mediagrains_async/*' + sh 'python2.7 -m flake8 --filename=mediagrains/*.py,tests/test_*.py' script { env.lint27_result = "SUCCESS" // This will only run if the sh above succeeded } @@ -69,7 +69,7 @@ pipeline { } bbcGithubNotify(context: "lint/flake8_3", status: "PENDING") // Run the linter - sh 'python3 -m flake8' + sh 'python3 -m flake8 --filename=mediagrains/*.py,mediagrains_async/*.py,tests/test_*.py,tests/atest_*.py' script { env.lint3_result = "SUCCESS" // This will only run if the sh above succeeded } From c7b6695d31272dd16572205d0ac79852309bf153 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Fri, 26 Apr 2019 10:46:15 +0100 Subject: [PATCH 17/76] Make Jenkinsfile use jamesba-change_branch branch of ci scripts --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 27354c9..e828e24 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,4 +1,4 @@ -@Library("rd-apmm-groovy-ci-library@v1.x") _ +@Library("rd-apmm-groovy-ci-library@jamesba-change_branch") _ /* Runs the following steps in parallel and reports results to GitHub: From e66c0adcf93d116cd5cb6dc63c2f0cc2cd7abe11 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Fri, 26 Apr 2019 11:17:10 +0100 Subject: [PATCH 18/76] Jenkinsfile switched back to using branch v1.x of ci scripts now that jamesba-change_branch has been merged --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index e828e24..27354c9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,4 +1,4 @@ -@Library("rd-apmm-groovy-ci-library@jamesba-change_branch") _ +@Library("rd-apmm-groovy-ci-library@v1.x") _ /* Runs the following steps in parallel and reports results to GitHub: From b2b6a45fa931d68202644ac00793e3e888993a1b Mon Sep 17 00:00:00 2001 From: James Weaver Date: Fri, 26 Apr 2019 14:42:50 +0100 Subject: [PATCH 19/76] 2.6.0.dev3: version bump --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f951bdc..7380149 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev2", + version="2.6.0.dev3", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From 862301bf060a02213b16d73aecb10a4872801d6f Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 29 Apr 2019 14:43:02 +0100 Subject: [PATCH 20/76] BUGFIX: add submodules to setup.py --- setup.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7380149..56b8485 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,10 @@ from sys import version_info packages = { - 'mediagrains': 'mediagrains' + 'mediagrains': 'mediagrains', + 'mediagrains.hypothesis': 'mediagrains/hypothesis', + 'mediagrains.comparison': 'mediagrains/comparison', + 'mediagrains.utils': 'mediagrains/utils' } packages_required = [ From 6b66f4212e6a60a62d3e204128dbdcba1d757eae Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 29 Apr 2019 14:43:33 +0100 Subject: [PATCH 21/76] v2.6.0.dev4: version bump --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 56b8485..15e62a6 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev3", + version="2.6.0.dev4", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From 09849df13aadfb515fd8a2fe6b4458205db9d5f4 Mon Sep 17 00:00:00 2001 From: Sam Mesterton-Gibbons Date: Mon, 29 Apr 2019 17:29:58 +0100 Subject: [PATCH 22/76] Send Slack messages for dev branch Bumps to 2.6.0.dev5 --- Jenkinsfile | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 27354c9..e6b11be 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -296,7 +296,7 @@ pipeline { } post { always { - bbcSlackNotify(channel: "#apmm-cloudfit") + bbcSlackNotify(channel: "#apmm-cloudfit", branches: ["master", "dev"]) } } } diff --git a/setup.py b/setup.py index 15e62a6..f768d5f 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev4", + version="2.6.0.dev5", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From d56b16eee5651c1c18174d563ad7004aa1b48ef4 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 2 Oct 2019 10:56:25 +0100 Subject: [PATCH 23/76] testsignalgenerator: Added tests for ColourBars at 100 and 75 in U8_444 --- CHANGELOG.md | 1 + setup.py | 2 +- tests/test_testsignalgenerator.py | 100 +++++++++++++++++++++++++++++- 3 files changed, 101 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31bb256..ee7a73d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - Added support for async methods to gsf decoder in python 3.6+ - Added `Grain.origin_timerange` method. - Added `Grain.normalise_time` method. +- Added `Colourbars` test signal generator ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/setup.py b/setup.py index f768d5f..e218037 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev5", + version="2.6.0.dev6", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', diff --git a/tests/test_testsignalgenerator.py b/tests/test_testsignalgenerator.py index 5a03279..8067586 100644 --- a/tests/test_testsignalgenerator.py +++ b/tests/test_testsignalgenerator.py @@ -28,7 +28,7 @@ from math import sin, pi from mediagrains.cogenums import CogFrameFormat, CogAudioFormat -from mediagrains.testsignalgenerator import LumaSteps, Tone1K, Silence +from mediagrains.testsignalgenerator import LumaSteps, Tone1K, Silence, ColourBars src_id = UUID("f2b6a9b4-2ea8-11e8-a468-878cf869cbec") @@ -344,6 +344,104 @@ def test_lumasteps_with_step_2(self): rate.numerator, rate.denominator) +class TestColourBars(TestCase): + def test_colourbars100_u8_444(self): + """Testing that the ColourBars generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken + for testing under control""" + width = 240 + height = 4 + UUT = ColourBars(src_id, flow_id, + width, height, + intensity=1.0, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = [ + (0xFF, 0x80, 0x80), + (0xFF, 0x00, 0x94), + (0xB2, 0xAB, 0x00), + (0x95, 0x2B, 0x15), + (0x69, 0xD4, 0xEA), + (0x4C, 0x54, 0xFF), + (0x1D, 0xFF, 0x6B), + (0x00, 0x80, 0x80)] + + for y in range(0, height): + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], expected[x//(width//8)][0]) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_colourbars75_u8_444(self): + """Testing that the ColourBars generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken + for testing under control""" + width = 240 + height = 4 + UUT = ColourBars(src_id, flow_id, + width, height, + intensity=1.0, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = [ + (0xFF, 0x80, 0x80), + (0xFF, 0x00, 0x94), + (0xB2, 0xAB, 0x00), + (0x95, 0x2B, 0x15), + (0x69, 0xD4, 0xEA), + (0x4C, 0x54, 0xFF), + (0x1D, 0xFF, 0x6B), + (0x00, 0x80, 0x80)] + + for y in range(0, height): + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], int(0.75*expected[x//(width//8)][0])) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + if __name__ == "__main__": import unittest From 6d26f4d823ce95c4d473bbfeea1e160fdfae60e0 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 2 Oct 2019 12:00:39 +0100 Subject: [PATCH 24/76] testsignalgenerator: ColourBars for U8_444 --- mediagrains/testsignalgenerator.py | 93 +++++++++++++++++++++++++----- tests/test_testsignalgenerator.py | 2 +- 2 files changed, 81 insertions(+), 14 deletions(-) diff --git a/mediagrains/testsignalgenerator.py b/mediagrains/testsignalgenerator.py index f4618e2..78bf752 100644 --- a/mediagrains/testsignalgenerator.py +++ b/mediagrains/testsignalgenerator.py @@ -36,21 +36,21 @@ # information about formats # in the order: -# (num_bytes_per_sample, (offset, range), (offset, range), (offset, range)) +# (num_bytes_per_sample, (offset, range), (offset, range), (offset, range), active_bits_per_sample) # in YUV order pixel_ranges = { - CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224)), - CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224)), - CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224)), - CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896)), - CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896)), - CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896)), - CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584)), - CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584)), - CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584)), - CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344)), - CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344)), - CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344)), + CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), } @@ -124,6 +124,73 @@ def LumaSteps(src_id, flow_id, width, height, vg.sync_timestamp = vg.origin_timestamp + +def ColourBars(src_id, flow_id, width, height, + intensity=0.75, + rate=Fraction(25, 1), + origin_timestamp=None, + cog_frame_format=CogFrameFormat.U8_444, + step=1): + """Returns a generator for colour bar video grains in specified format. + :param src_id: source_id for grains + :param flow_id: flow_id for grains + :param width: width of grains + :param height: height of grains + :param intensity: intensity of colour bars (usually 1.0 or 0.75) + :param rate: rate of grains + :param origin_timestamp: the origin timestamp of the first grain. + :param step: The number of grains to increment by each time (values above 1 cause skipping)""" + + if cog_frame_format not in pixel_ranges: + raise ValueError("Not a supported format for this generator") + + _bpp = pixel_ranges[cog_frame_format][0] + _steps = 8 + bs = 16 - pixel_ranges[cog_frame_format][4] + + values = [ + (int((0xFFFF >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs), + (int((0xFFFF >> bs) * intensity), 0x0000 >> bs, 0x9400 >> bs), + (int((0xB200 >> bs) * intensity), 0xABFF >> bs, 0x0000 >> bs), + (int((0x95FF >> bs) * intensity), 0x2BFF >> bs, 0x15FF >> bs), + (int((0x69FF >> bs) * intensity), 0xD400 >> bs, 0xEA00 >> bs), + (int((0x4C00 >> bs) * intensity), 0x5400 >> bs, 0xFFFF >> bs), + (int((0x1DFF >> bs) * intensity), 0xFFFF >> bs, 0x6BFF >> bs), + (int((0x0000 >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs)] + + vg = VideoGrain(src_id, flow_id, origin_timestamp=origin_timestamp, + rate=rate, + cog_frame_format=cog_frame_format, + cog_frame_layout=CogFrameLayout.FULL_FRAME, + width=width, + height=height) + + lines = [bytearray(vg.components[0].width*_bpp), bytearray(vg.components[1].width*_bpp), bytearray(vg.components[2].width*_bpp)] + for c in range(0, 3): + for x in range(0, vg.components[c].width): + pos = x//(width//_steps) + if _bpp == 1: + lines[c][x] = values[pos][c] + elif _bpp == 2: + lines[c][2*x + 0] = values[pos][c] & 0xFF + lines[c][2*x + 1] = (values[pos][c] >> 8) &0xFF + + + for c in range(0, 3): + for y in range(0, vg.components[c].height): + vg.data[vg.components[c].offset + y*vg.components[c].stride:vg.components[c].offset + y*vg.components[c].stride + vg.components[c].width*_bpp] = lines[c] + + origin_timestamp = vg.origin_timestamp + count = 0 + while True: + yield deepcopy(vg) + count += step + vg.origin_timestamp = origin_timestamp + TimeOffset.from_count(count, + rate.numerator, rate.denominator) + vg.sync_timestamp = vg.origin_timestamp + + + def Tone1K(src_id, flow_id, samples=1920, channels=1, diff --git a/tests/test_testsignalgenerator.py b/tests/test_testsignalgenerator.py index 8067586..b75c585 100644 --- a/tests/test_testsignalgenerator.py +++ b/tests/test_testsignalgenerator.py @@ -401,7 +401,7 @@ def test_colourbars75_u8_444(self): height = 4 UUT = ColourBars(src_id, flow_id, width, height, - intensity=1.0, + intensity=0.75, origin_timestamp=origin_timestamp, cog_frame_format=CogFrameFormat.U8_444, rate=Fraction(25, 1), From 82e59821b3b36bea9179ad12092fea558ddc5662 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 2 Oct 2019 13:24:13 +0100 Subject: [PATCH 25/76] testsignalgenerator: test colourbars at 10bit 4:2:2 --- tests/test_testsignalgenerator.py | 53 +++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tests/test_testsignalgenerator.py b/tests/test_testsignalgenerator.py index b75c585..4200414 100644 --- a/tests/test_testsignalgenerator.py +++ b/tests/test_testsignalgenerator.py @@ -441,6 +441,59 @@ def test_colourbars75_u8_444(self): ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + def test_colourbars75_s16_422_10bit(self): + """Testing that the ColourBars generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken + for testing under control""" + width = 240 + height = 4 + UUT = ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + rate=Fraction(25, 1), + step=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = [ + (0x3FF, 0x200, 0x200), + (0x3FF, 0x000, 0x250), + (0x2C8, 0x2AF, 0x000), + (0x257, 0x0AF, 0x057), + (0x1A7, 0x350, 0x3A8), + (0x130, 0x150, 0x3FF), + (0x077, 0x3FF, 0x1AF), + (0x000, 0x200, 0x200)] + + for y in range(0, height): + for x in range(0, width//2): + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 0], int(0.75*expected[(2*x + 0)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], int(0.75*expected[(2*x + 0)//(width//8)][0]) >> 8) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], int(0.75*expected[(2*x + 1)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], int(0.75*expected[(2*x + 1)//(width//8)][0]) >> 8) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], expected[x//(width//8)][1] & 0xFF) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], expected[x//(width//8)][1] >> 8) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], expected[x//(width//8)][2] & 0xFF) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], expected[x//(width//8)][2] >> 8) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + if __name__ == "__main__": import unittest From 0202e42c81fd71c6c541721d03055ba426b66e01 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 2 Oct 2019 17:22:27 +0100 Subject: [PATCH 26/76] testsignalgenerator: Moving black bar --- mediagrains/testsignalgenerator.py | 60 ++++++++- tests/test_testsignalgenerator.py | 190 ++++++++++++++++++++++++++++- 2 files changed, 242 insertions(+), 8 deletions(-) diff --git a/mediagrains/testsignalgenerator.py b/mediagrains/testsignalgenerator.py index 78bf752..b3556bf 100644 --- a/mediagrains/testsignalgenerator.py +++ b/mediagrains/testsignalgenerator.py @@ -32,7 +32,7 @@ from . import VideoGrain, AudioGrain from .cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat -__all__ = ["LumaSteps", "Tone1K", "Tone", "Silence"] +__all__ = ["LumaSteps", "Tone1K", "Tone", "Silence", "ColourBars", "MovingBarOverlay"] # information about formats # in the order: @@ -168,12 +168,12 @@ def ColourBars(src_id, flow_id, width, height, lines = [bytearray(vg.components[0].width*_bpp), bytearray(vg.components[1].width*_bpp), bytearray(vg.components[2].width*_bpp)] for c in range(0, 3): for x in range(0, vg.components[c].width): - pos = x//(width//_steps) + pos = x//(vg.components[c].width//_steps) if _bpp == 1: lines[c][x] = values[pos][c] elif _bpp == 2: lines[c][2*x + 0] = values[pos][c] & 0xFF - lines[c][2*x + 1] = (values[pos][c] >> 8) &0xFF + lines[c][2*x + 1] = (values[pos][c] >> 8) & 0xFF for c in range(0, 3): @@ -190,6 +190,60 @@ def ColourBars(src_id, flow_id, width, height, vg.sync_timestamp = vg.origin_timestamp +def MovingBarOverlay(grain_gen, height=100, speed=1.0): + """Call this method and pass an iterable of video grains as the first parameter. This method will overlay a moving black bar onto the grains. + + :param grain_gen: An iterable which yields video grains + :param heigh: The height of the bar in pixels + :param speed: A floating point speed in pixels per frame + + :returns: A generator which yields video grains + """ + bar = None + for grain in grain_gen: + v_subs = (grain.components[0].height + grain.components[1].height - 1)//grain.components[1].height + + if bar is None: + if grain.format not in pixel_ranges: + raise ValueError("Not a supported format for this generator") + + _bpp = pixel_ranges[grain.format][0] + + bar = [bytearray(grain.components[0].width*_bpp * height), bytearray(grain.components[1].width*_bpp * height // v_subs), bytearray(grain.components[2].width*_bpp * height // v_subs)] + for y in range(0, height): + for x in range(0, grain.components[0].width): + bar[0][y*grain.components[0].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][1][0] & 0xFF + if _bpp > 1: + bar[0][y*grain.components[0].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][1][0] >> 8 + for y in range(0, height // v_subs): + for x in range(0, grain.components[1].width): + bar[1][y*grain.components[1].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][2][0] & 0xFF + if _bpp > 1: + bar[1][y*grain.components[1].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][2][0] >> 8 + bar[2][y*grain.components[2].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][3][0] & 0xFF + if _bpp > 1: + bar[2][y*grain.components[2].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][3][0] >> 8 + + fnum = int(speed*grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator)) + + for y in range(0, height): + grain.data[ + grain.components[0].offset + ((fnum + y) % grain.components[0].height)*grain.components[0].stride: + grain.components[0].offset + ((fnum + y) % grain.components[0].height)*grain.components[0].stride + grain.components[0].width*_bpp ] = ( + bar[0][y*grain.components[0].width * _bpp: (y+1)*grain.components[0].width * _bpp]) + for y in range(0, height // v_subs): + grain.data[ + grain.components[1].offset + ((fnum//v_subs + y) % grain.components[1].height)*grain.components[1].stride: + grain.components[1].offset + ((fnum//v_subs + y) % grain.components[1].height)*grain.components[1].stride + grain.components[1].width*_bpp ] = ( + bar[1][y*grain.components[1].width * _bpp: (y+1)*grain.components[1].width * _bpp]) + grain.data[ + grain.components[2].offset + ((fnum//v_subs + y) % grain.components[2].height)*grain.components[2].stride: + grain.components[2].offset + ((fnum//v_subs + y) % grain.components[2].height)*grain.components[2].stride + grain.components[2].width*_bpp ] = ( + bar[2][y*grain.components[2].width * _bpp: (y+1)*grain.components[2].width * _bpp]) + + + yield grain + def Tone1K(src_id, flow_id, samples=1920, diff --git a/tests/test_testsignalgenerator.py b/tests/test_testsignalgenerator.py index 4200414..b3c04b8 100644 --- a/tests/test_testsignalgenerator.py +++ b/tests/test_testsignalgenerator.py @@ -28,7 +28,7 @@ from math import sin, pi from mediagrains.cogenums import CogFrameFormat, CogAudioFormat -from mediagrains.testsignalgenerator import LumaSteps, Tone1K, Silence, ColourBars +from mediagrains.testsignalgenerator import LumaSteps, Tone1K, Silence, ColourBars, MovingBarOverlay src_id = UUID("f2b6a9b4-2ea8-11e8-a468-878cf869cbec") @@ -487,10 +487,190 @@ def test_colourbars75_s16_422_10bit(self): self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], int(0.75*expected[(2*x + 0)//(width//8)][0]) >> 8) self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], int(0.75*expected[(2*x + 1)//(width//8)][0]) & 0xFF) self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], int(0.75*expected[(2*x + 1)//(width//8)][0]) >> 8) - self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], expected[x//(width//8)][1] & 0xFF) - self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], expected[x//(width//8)][1] >> 8) - self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], expected[x//(width//8)][2] & 0xFF) - self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], expected[x//(width//8)][2] >> 8) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], expected[x//(width//16)][1] & 0xFF) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], expected[x//(width//16)][1] >> 8) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], expected[x//(width//16)][2] & 0xFF) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], expected[x//(width//16)][2] >> 8) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + +class TestMovingBarOverlay(TestCase): + def test_movingbar_colourbars100_u8_444(self): + """Testing that the ColourBars with MovingBarOverlay generators produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken for testing under control""" + width = 240 + height = 4 + UUT = MovingBarOverlay(ColourBars(src_id, flow_id, + width, height, + intensity=1.0, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1), + height=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = [ + (0xFF, 0x80, 0x80), + (0xFF, 0x00, 0x94), + (0xB2, 0xAB, 0x00), + (0x95, 0x2B, 0x15), + (0x69, 0xD4, 0xEA), + (0x4C, 0x54, 0xFF), + (0x1D, 0xFF, 0x6B), + (0x00, 0x80, 0x80)] + + fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) + for y in range(0, height): + if (fnum % height) == y: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], 16) + self.assertEqual(U[y*grain.components[1].stride + x], 128) + self.assertEqual(V[y*grain.components[2].stride + x], 128) + else: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], expected[x//(width//8)][0]) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_movingbar_colourbars75_u8_444(self): + """Testing that the ColourBars with MovingBarOverlay generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken for testing under control""" + width = 240 + height = 4 + UUT = MovingBarOverlay(ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1), + height=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = [ + (0xFF, 0x80, 0x80), + (0xFF, 0x00, 0x94), + (0xB2, 0xAB, 0x00), + (0x95, 0x2B, 0x15), + (0x69, 0xD4, 0xEA), + (0x4C, 0x54, 0xFF), + (0x1D, 0xFF, 0x6B), + (0x00, 0x80, 0x80)] + + fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) + + for y in range(0, height): + if (fnum % height) == y: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], 16) + self.assertEqual(U[y*grain.components[1].stride + x], 128) + self.assertEqual(V[y*grain.components[2].stride + x], 128) + else: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], int(0.75*expected[x//(width//8)][0])) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_movingbar_colourbars75_s16_422_10bit(self): + """Testing that the ColourBars with MovingBarOverlay generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken for testing under control""" + width = 240 + height = 4 + UUT = MovingBarOverlay(ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + rate=Fraction(25, 1), + step=1), + height=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = [ + (0x3FF, 0x200, 0x200), + (0x3FF, 0x000, 0x250), + (0x2C8, 0x2AF, 0x000), + (0x257, 0x0AF, 0x057), + (0x1A7, 0x350, 0x3A8), + (0x130, 0x150, 0x3FF), + (0x077, 0x3FF, 0x1AF), + (0x000, 0x200, 0x200)] + + fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) + + for y in range(0, height): + if (fnum % height) == y: + for x in range(0, width//2): + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 0], 64) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], 0) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], 64) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], 0) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], 0) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], 2) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], 0) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], 2) + else: + for x in range(0, width//2): + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 0], int(0.75*expected[(2*x + 0)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], int(0.75*expected[(2*x + 0)//(width//8)][0]) >> 8) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], int(0.75*expected[(2*x + 1)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], int(0.75*expected[(2*x + 1)//(width//8)][0]) >> 8) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], expected[x//(width//16)][1] & 0xFF) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], expected[x//(width//16)][1] >> 8) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], expected[x//(width//16)][2] & 0xFF) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], expected[x//(width//16)][2] >> 8) + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) From 01d3071454dcef96f7ce4559424ef31e3f2ca504 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Fri, 4 Oct 2019 10:14:24 +0100 Subject: [PATCH 27/76] testsignalgenerator: some minor bugfixes --- mediagrains/testsignalgenerator.py | 2 +- tests/test_testsignalgenerator.py | 80 ++++++++++-------------------- 2 files changed, 27 insertions(+), 55 deletions(-) diff --git a/mediagrains/testsignalgenerator.py b/mediagrains/testsignalgenerator.py index b3556bf..fc423a5 100644 --- a/mediagrains/testsignalgenerator.py +++ b/mediagrains/testsignalgenerator.py @@ -150,7 +150,7 @@ def ColourBars(src_id, flow_id, width, height, values = [ (int((0xFFFF >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs), - (int((0xFFFF >> bs) * intensity), 0x0000 >> bs, 0x9400 >> bs), + (int((0xE1FF >> bs) * intensity), 0x0000 >> bs, 0x9400 >> bs), (int((0xB200 >> bs) * intensity), 0xABFF >> bs, 0x0000 >> bs), (int((0x95FF >> bs) * intensity), 0x2BFF >> bs, 0x15FF >> bs), (int((0x69FF >> bs) * intensity), 0xD400 >> bs, 0xEA00 >> bs), diff --git a/tests/test_testsignalgenerator.py b/tests/test_testsignalgenerator.py index b3c04b8..6547fcb 100644 --- a/tests/test_testsignalgenerator.py +++ b/tests/test_testsignalgenerator.py @@ -345,6 +345,26 @@ def test_lumasteps_with_step_2(self): class TestColourBars(TestCase): + colourbars_expected_values_8bit = [ + (0xFF, 0x80, 0x80), + (0xE1, 0x00, 0x94), + (0xB2, 0xAB, 0x00), + (0x95, 0x2B, 0x15), + (0x69, 0xD4, 0xEA), + (0x4C, 0x54, 0xFF), + (0x1D, 0xFF, 0x6B), + (0x00, 0x80, 0x80)] + + colourbars_expected_values_10bit = [ + (0x3FF, 0x200, 0x200), + (0x387, 0x000, 0x250), + (0x2C8, 0x2AF, 0x000), + (0x257, 0x0AF, 0x057), + (0x1A7, 0x350, 0x3A8), + (0x130, 0x150, 0x3FF), + (0x077, 0x3FF, 0x1AF), + (0x000, 0x200, 0x200)] + def test_colourbars100_u8_444(self): """Testing that the ColourBars generator produces correct video frames when the height is 4 lines and the width 240 pixels (to keep time taken @@ -375,15 +395,7 @@ def test_colourbars100_u8_444(self): U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] - expected = [ - (0xFF, 0x80, 0x80), - (0xFF, 0x00, 0x94), - (0xB2, 0xAB, 0x00), - (0x95, 0x2B, 0x15), - (0x69, 0xD4, 0xEA), - (0x4C, 0x54, 0xFF), - (0x1D, 0xFF, 0x6B), - (0x00, 0x80, 0x80)] + expected = self.colourbars_expected_values_8bit for y in range(0, height): for x in range(0, width): @@ -423,15 +435,7 @@ def test_colourbars75_u8_444(self): U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] - expected = [ - (0xFF, 0x80, 0x80), - (0xFF, 0x00, 0x94), - (0xB2, 0xAB, 0x00), - (0x95, 0x2B, 0x15), - (0x69, 0xD4, 0xEA), - (0x4C, 0x54, 0xFF), - (0x1D, 0xFF, 0x6B), - (0x00, 0x80, 0x80)] + expected = self.colourbars_expected_values_8bit for y in range(0, height): for x in range(0, width): @@ -471,15 +475,7 @@ def test_colourbars75_s16_422_10bit(self): U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] - expected = [ - (0x3FF, 0x200, 0x200), - (0x3FF, 0x000, 0x250), - (0x2C8, 0x2AF, 0x000), - (0x257, 0x0AF, 0x057), - (0x1A7, 0x350, 0x3A8), - (0x130, 0x150, 0x3FF), - (0x077, 0x3FF, 0x1AF), - (0x000, 0x200, 0x200)] + expected = self.colourbars_expected_values_10bit for y in range(0, height): for x in range(0, width//2): @@ -526,15 +522,7 @@ def test_movingbar_colourbars100_u8_444(self): U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] - expected = [ - (0xFF, 0x80, 0x80), - (0xFF, 0x00, 0x94), - (0xB2, 0xAB, 0x00), - (0x95, 0x2B, 0x15), - (0x69, 0xD4, 0xEA), - (0x4C, 0x54, 0xFF), - (0x1D, 0xFF, 0x6B), - (0x00, 0x80, 0x80)] + expected = TestColourBars.colourbars_expected_values_8bit fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) for y in range(0, height): @@ -581,15 +569,7 @@ def test_movingbar_colourbars75_u8_444(self): U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] - expected = [ - (0xFF, 0x80, 0x80), - (0xFF, 0x00, 0x94), - (0xB2, 0xAB, 0x00), - (0x95, 0x2B, 0x15), - (0x69, 0xD4, 0xEA), - (0x4C, 0x54, 0xFF), - (0x1D, 0xFF, 0x6B), - (0x00, 0x80, 0x80)] + expected = TestColourBars.colourbars_expected_values_8bit fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) @@ -637,15 +617,7 @@ def test_movingbar_colourbars75_s16_422_10bit(self): U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] - expected = [ - (0x3FF, 0x200, 0x200), - (0x3FF, 0x000, 0x250), - (0x2C8, 0x2AF, 0x000), - (0x257, 0x0AF, 0x057), - (0x1A7, 0x350, 0x3A8), - (0x130, 0x150, 0x3FF), - (0x077, 0x3FF, 0x1AF), - (0x000, 0x200, 0x200)] + expected = TestColourBars.colourbars_expected_values_10bit fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) From 1c8ad35e4f169d921b014828035e994430bdf3d3 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Fri, 4 Oct 2019 10:15:02 +0100 Subject: [PATCH 28/76] testsignalgenerator: changleog update --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee7a73d..b31a21e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - Added `Grain.origin_timerange` method. - Added `Grain.normalise_time` method. - Added `Colourbars` test signal generator +- Added `MovingBarOverlay` for test signal generators ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does From 4a1b3264d4269a97a092180c7451740aa1c1392b Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 9 Oct 2019 13:50:01 +0100 Subject: [PATCH 29/76] py36 sublibrary: slight refactor to make it easier to include new py36+ code that is not async --- MANIFEST.in | 2 +- mediagrains/asyncio.py | 2 +- mediagrains_py36/__init__.py | 0 {mediagrains_async => mediagrains_py36/asyncio}/__init__.py | 0 {mediagrains_async => mediagrains_py36/asyncio}/aiobytes.py | 0 {mediagrains_async => mediagrains_py36/asyncio}/bytesaio.py | 0 setup.py | 3 ++- tests/{atest_gsf.py => test36_asyncio_gsf.py} | 0 tox.ini | 3 ++- 9 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 mediagrains_py36/__init__.py rename {mediagrains_async => mediagrains_py36/asyncio}/__init__.py (100%) rename {mediagrains_async => mediagrains_py36/asyncio}/aiobytes.py (100%) rename {mediagrains_async => mediagrains_py36/asyncio}/bytesaio.py (100%) rename tests/{atest_gsf.py => test36_asyncio_gsf.py} (100%) diff --git a/MANIFEST.in b/MANIFEST.in index a3c4119..f455058 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,6 +3,6 @@ include tox.ini include COPYING recursive-include examples *.gsf recursive-include tests *.py -recursive-include mediagrains_async *.py +recursive-include mediagrains_py36 *.py include ICLA.md include LICENSE.md diff --git a/mediagrains/asyncio.py b/mediagrains/asyncio.py index 16434a0..2ad0429 100644 --- a/mediagrains/asyncio.py +++ b/mediagrains/asyncio.py @@ -22,7 +22,7 @@ from sys import version_info if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - from mediagrains_async import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads # noqa: F401 + from mediagrains_py36.asyncio import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads # noqa: F401 __all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError", "loads"] else: diff --git a/mediagrains_py36/__init__.py b/mediagrains_py36/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mediagrains_async/__init__.py b/mediagrains_py36/asyncio/__init__.py similarity index 100% rename from mediagrains_async/__init__.py rename to mediagrains_py36/asyncio/__init__.py diff --git a/mediagrains_async/aiobytes.py b/mediagrains_py36/asyncio/aiobytes.py similarity index 100% rename from mediagrains_async/aiobytes.py rename to mediagrains_py36/asyncio/aiobytes.py diff --git a/mediagrains_async/bytesaio.py b/mediagrains_py36/asyncio/bytesaio.py similarity index 100% rename from mediagrains_async/bytesaio.py rename to mediagrains_py36/asyncio/bytesaio.py diff --git a/setup.py b/setup.py index e218037..59000ed 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,8 @@ if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - packages['mediagrains_async'] = 'mediagrains_async' + packages['mediagrains_py36'] = 'mediagrains_py36' + packages['mediagrains_py36.asyncio'] = 'mediagrains_py36/asyncio' package_names = list(packages.keys()) diff --git a/tests/atest_gsf.py b/tests/test36_asyncio_gsf.py similarity index 100% rename from tests/atest_gsf.py rename to tests/test36_asyncio_gsf.py diff --git a/tox.ini b/tox.ini index 951e1ef..46f3906 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,8 @@ envlist = py27, py36 commands = py27: python -m unittest discover -s tests -p test_*.py py35: python -m unittest discover -s tests -p test_*.py - py36: python -m unittest discover -s tests -p *test_*.py + py36: python -m unittest discover -s tests -p test*_*.py + py37: python -m unittest discover -s tests -p test*_*.py deps = hypothesis >= 4.0.0 mock From f655404e4a9b5fb8f99f0be3b64109226bf7b64e Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 9 Oct 2019 13:54:55 +0100 Subject: [PATCH 30/76] 2.6.0-dev7 version bump --- CHANGELOG.md | 1 + setup.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b31a21e..2d06829 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - Added `Grain.normalise_time` method. - Added `Colourbars` test signal generator - Added `MovingBarOverlay` for test signal generators +- Added `mediagrains.numpy` sublibrary for handling video grains as numpy arrays, in python 3.6+ ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/setup.py b/setup.py index 59000ed..1b9aac0 100644 --- a/setup.py +++ b/setup.py @@ -32,6 +32,7 @@ 'enum34 >= 1.1.6;python_version<"3.4"', "six >= 1.10.0", "frozendict >= 1.2", + 'numpy >= 1.17.2;python_version>="3.6"', ] deps_required = [] @@ -45,7 +46,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev6", + version="2.6.0.dev7", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From f5ea2d30b17467c30522789b15b94c91b1105735 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 9 Oct 2019 15:16:47 +0100 Subject: [PATCH 31/76] numpy: tests for basic interface --- mediagrains/numpy.py | 29 ++ mediagrains_py36/numpy/__init__.py | 145 ++++++++ tests/test36_numpy_videograin.py | 508 +++++++++++++++++++++++++++++ 3 files changed, 682 insertions(+) create mode 100644 mediagrains/numpy.py create mode 100644 mediagrains_py36/numpy/__init__.py create mode 100644 tests/test36_numpy_videograin.py diff --git a/mediagrains/numpy.py b/mediagrains/numpy.py new file mode 100644 index 0000000..9c7bbb9 --- /dev/null +++ b/mediagrains/numpy.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Numpy compatible layer for mediagrains, but only available in python 3.6+ +""" + +from sys import version_info + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + from mediagrains_py36.numpy import * # noqa: F401 + + __all__ = [] +else: + __all__ = [] diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py new file mode 100644 index 0000000..14726c7 --- /dev/null +++ b/mediagrains_py36/numpy/__init__.py @@ -0,0 +1,145 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for handling mediagrains in numpy arrays +""" + +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout +from mediagrains import grain as bytesgrain +from mediagrains import grain_constructors as bytesgrain_constructors +from mediatimestamp.immutable import Timestamp +from fractions import Fraction +from uuid import UUID + +import numpy as np + +from typing import Union, Optional, SupportsBytes + + +__all__ = ['VideoGrain', 'VIDEOGRAIN'] + + +class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): + pass + + +def VideoGrain( + src_id_or_meta: Optional[Union[UUID, dict]]=None, + flow_id_or_data: Optional[Union[UUID, SupportsBytes]]=None, + creation_timestamp: Optional[Timestamp]=None, + origin_timestamp: Optional[Timestamp]=None, + sync_timestamp: Optional[Timestamp]=None, + rate: Fraction=Fraction(25, 1), + duration: Fraction=Fraction(1, 25), + cog_frame_format: CogFrameLayout=CogFrameFormat.UNKNOWN, + width: int=1920, + height: int=1080, + cog_frame_layout: CogFrameLayout=CogFrameLayout.UNKNOWN, + src_id: Optional[UUID]=None, + source_id: Optional[UUID]=None, + format: Optional[CogFrameFormat]=None, + layout: Optional[CogFrameLayout]=None, + flow_id: Optional[UUID]=None, + data: Optional[SupportsBytes]=None) -> VIDEOGRAIN: + """\ +Function called to construct a video grain either from existing data or with new data. + +First method of calling: + + VideoGrain(meta, data) + +where meta is a dictionary containing the grain metadata, and data is a bytes-like +object which contains the grain's payload. + +A properly formated metadata dictionary for a Video Grain should look like: + + { + "@_ns": "urn:x-ipstudio:ns:0.1", + "grain": { + "grain_type": "audio", + "source_id": src_id, # str or uuid.UUID + "flow_id": flow_id, # str or uuid.UUID + "origin_timestamp": origin_timestamp, # str or mediatimestamps.Timestamp + "sync_timestamp": sync_timestamp, # str or mediatimestamps.Timestamp + "creation_timestamp": creation_timestamp, # str or mediatimestamps.Timestamp + "rate": { + "numerator": 0, # int + "denominator": 1, # int + }, + "duration": { + "numerator": 0, # int + "denominator": 1, # int + }, + "cog_frame": { + "format": cog_frame_format, # int or CogFrameFormat + "width": width, # int + "height": height, # int + "layout": cog_frame_layout, # int of CogFrameLayout + "extension": 0, # int + "components": [ + { + "stride": luma_stride, # int + "width": luma_width, # int + "height": luma_height, # int + "length": luma_length # int + }, + { + "stride": chroma_stride, # int + "width": chroma_width, # int + "height": chroma_height, # int + "length": chroma_length # int + }, + { + "stride": chroma_stride, # int + "width": chroma_width, # int + "height": chroma_height, # int + "length": chroma_length # int + }, + ] + } + } + } + +Alternatively it may be called as: + + VideoGrain(src_id, flow_id, + origin_timestamp=None, + sync_timestamp=None, + rate=Fraction(25, 1), + duration=Fraction(1, 25), + cog_frame_format=CogFrameFormat.UNKNOWN, + width=1920, + height=1080, + cog_frame_layout=CogFrameLayout.UNKNOWN, + data=None): + +in which case a new grain will be constructed with type "video" and the +specified metadata. If the data argument is None then a new bytearray object +will be constructed with size determined by the format, height, and width. +The components array will similarly be filled out automatically with correct +data for the format and size specified. + + +In either case the value returned by this function will be an instance of the +class mediagrains.grain.VIDEOGRAIN, and the data element stored within it will be an +instance of the class numpy.ndarray. + +(the parameters "source_id" and "src_id" are aliases for each other. source_id is probably prefered, +but src_id is kept avaialble for backwards compatibility) +""" + pass diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py new file mode 100644 index 0000000..d10f37e --- /dev/null +++ b/tests/test36_numpy_videograin.py @@ -0,0 +1,508 @@ +#!/usr/bin/python +# +# Copyright 2018 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest import TestCase + +import uuid +from mediagrains.numpy import VideoGrain +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat +from mediatimestamp.immutable import Timestamp, TimeOffset, TimeRange +import mock +from fractions import Fraction +import json +from copy import copy, deepcopy + + +class TestGrain (TestCase): + def test_video_grain_create_YUV422_10bit(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertEqual(grain.grain_type, "video") + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ots) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.sync_timestamp, sts) + self.assertEqual(grain.creation_timestamp, cts) + self.assertEqual(grain.rate, Fraction(25, 1)) + self.assertEqual(grain.duration, Fraction(1, 25)) + self.assertEqual(grain.timelabels, []) + self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) + self.assertEqual(grain.width, 1920) + self.assertEqual(grain.height, 1080) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.extension, 0) + self.assertIsNone(grain.source_aspect_ratio) + self.assertIsNone(grain.pixel_aspect_ratio) + + self.assertEqual(len(grain.components), 3) + self.assertEqual(grain.components[0].stride, 1920*2) + self.assertEqual(grain.components[0].width, 1920) + self.assertEqual(grain.components[0].height, 1080) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, 1920*1080*2) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.components[1].stride, 1920) + self.assertEqual(grain.components[1].width, 1920/2) + self.assertEqual(grain.components[1].height, 1080) + self.assertEqual(grain.components[1].offset, 1920*1080*2) + self.assertEqual(grain.components[1].length, 1920*1080) + self.assertEqual(len(grain.components[1]), 5) + + self.assertEqual(grain.components[2].stride, 1920) + self.assertEqual(grain.components[2].width, 1920/2) + self.assertEqual(grain.components[2].height, 1080) + self.assertEqual(grain.components[2].offset, 1920*1080*2 + 1920*1080) + self.assertEqual(grain.components[2].length, 1920*1080) + self.assertEqual(len(grain.components[2]), 5) + + self.assertIsInstance(grain.data, np.ndarray) + self.assertEqual(grain.data.nbytes, 1920*1080*2*2) + self.assertEqual(grain.data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.data.size, 1920*1080*2) + self.assertEqual(grain.data.itemsize, 2) + self.assertEqual(grain.data.ndim, 1) + self.assertEqual(grain.data.shape, (1920*1080*2,)) + + self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) + + self.assertEqual(len(grain.components), 3) + self.assertEqual(grain.components[0].stride, 1920*2) + self.assertEqual(grain.components[0].width, 1920) + self.assertEqual(grain.components[0].height, 1080) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, 1920*1080*2) + self.assertIsInstance(grain.components[0].data, np.ndarray) + self.assertEqual(grain.components[0].data.nbytes, 1920*1080*2) + self.assertEqual(grain.components[0].data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.components[0].data.size, 1920*1080) + self.assertEqual(grain.components[0].data.itemsize, 2) + self.assertEqual(grain.components[0].data.ndim, 2) + self.assertEqual(grain.components[0].data.shape, (1920, 1080)) + + self.assertEqual(grain.components[1].stride, 1920) + self.assertEqual(grain.components[1].width, 1920//2) + self.assertEqual(grain.components[1].height, 1080) + self.assertEqual(grain.components[1].offset, 1920*1080*2) + self.assertEqual(grain.components[1].length, 1920*1080) + self.assertIsInstance(grain.components[1].data, np.ndarray) + self.assertEqual(grain.components[1].data.nbytes, 1920*1080) + self.assertEqual(grain.components[1].data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.components[1].data.size, 1920*1080//2) + self.assertEqual(grain.components[1].data.itemsize, 2) + self.assertEqual(grain.components[1].data.ndim, 2) + self.assertEqual(grain.components[1].data.shape, (1920//2, 1080)) + + self.assertEqual(grain.components[2].stride, 1920) + self.assertEqual(grain.components[2].width, 1920//2) + self.assertEqual(grain.components[2].height, 1080) + self.assertEqual(grain.components[2].offset, 1920*1080*3) + self.assertEqual(grain.components[2].length, 1920*1080) + self.assertIsInstance(grain.components[2].data, np.ndarray) + self.assertEqual(grain.components[2].data.nbytes, 1920*1080) + self.assertEqual(grain.components[2].data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.components[2].data.size, 1920*1080//2) + self.assertEqual(grain.components[2].data.itemsize, 2) + self.assertEqual(grain.components[2].data.ndim, 2) + self.assertEqual(grain.components[2].data.shape, (1920//2, 1080)) + + self.assertEqual(grain.expected_length, 1920*1080*4) + + # Test that changes to the component arrays are reflected in the main data array + for y in range(0, 1080): + for x in range(0, 1920//2): + grain.components[0].data[2*x + 0, y] = (y*1920 + 2*x + 0)&0xFF + grain.components[1].data[x, y] = (y*1920//2 + x)&0xFF + 0x100 + grain.components[0].data[2*x + 1, y] = (y*1920 + 2*x + 1)&0x3FF + grain.components[2].data[x, y] = (y*1920//2 + x)&0xFF + 0x200 + + for n in range(0, 1920*1080): + self.assertEqual(grain.data[n], n&0xFF) + for n in range(0, 1920*1080//2): + self.assertEqual(grain.data[1920*1080 + n], n&0xFF + 0x100) + for n in range(0, 1920*1080//2): + self.assertEqual(grain.data[3*1920*1080//2 + n], n&0xFF + 0x200) + + + # def test_video_grain_create_sizes(self): + # for (fmt, complens) in [ + # (CogFrameFormat.S32_444, (1920*1080*4, 1920*1080*4, 1920*1080*4)), + # (CogFrameFormat.S32_422, (1920*1080*4, 1920*1080*2, 1920*1080*2)), + # (CogFrameFormat.S32_420, (1920*1080*4, 1920*1080, 1920*1080)), + # (CogFrameFormat.S16_444_10BIT, (1920*1080*2, 1920*1080*2, 1920*1080*2)), + # (CogFrameFormat.S16_422_10BIT, (1920*1080*2, 1920*1080, 1920*1080)), + # (CogFrameFormat.S16_420_10BIT, (1920*1080*2, 1920*1080/2, 1920*1080/2)), + # (CogFrameFormat.U8_444, (1920*1080, 1920*1080, 1920*1080)), + # (CogFrameFormat.U8_422, (1920*1080, 1920*1080/2, 1920*1080/2)), + # (CogFrameFormat.U8_420, (1920*1080, 1920*1080/4, 1920*1080/4)), + # (CogFrameFormat.UYVY, (1920*1080*2,)), + # (CogFrameFormat.RGB, (1920*1080*3,)), + # (CogFrameFormat.RGBA, (1920*1080*4,)), + # (CogFrameFormat.v210, (40*128*1080,)), + # (CogFrameFormat.v216, (1920*1080*4,)), + # (CogFrameFormat.UNKNOWN, ()), + # ]: + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + # cog_frame_format=fmt, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # self.assertEqual(len(grain.components), len(complens)) + # offset = 0 + # for (complen, comp) in zip(complens, grain.components): + # self.assertEqual(complen, comp.length) + # self.assertEqual(offset, comp.offset) + # offset += complen + + # self.assertEqual(len(grain.data), offset) + + # def test_video_component_setters(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # grain.components[0].stride = 23 + # self.assertEqual(grain.components[0].stride, 23) + # grain.components[0].width = 23 + # self.assertEqual(grain.components[0].width, 23) + # grain.components[0].height = 23 + # self.assertEqual(grain.components[0].height, 23) + # grain.components[0].offset = 23 + # self.assertEqual(grain.components[0].offset, 23) + # grain.components[0].length = 23 + # self.assertEqual(grain.components[0].length, 23) + + # grain.components[0]['length'] = 17 + # self.assertEqual(grain.components[0].length, 17) + + # grain.components[0]['potato'] = 3 + # self.assertIn('potato', grain.components[0]) + # self.assertEqual(grain.components[0]['potato'], 3) + # del grain.components[0]['potato'] + # self.assertNotIn('potato', grain.components[0]) + + # grain.components.append({'stride': 1920, + # 'width': 1920, + # 'height': 1080, + # 'offset': 1920*1080*2*2, + # 'length': 1920*1080}) + + # self.assertEqual(grain.components[3].stride, 1920) + # self.assertEqual(grain.components[3].width, 1920) + # self.assertEqual(grain.components[3].height, 1080) + # self.assertEqual(grain.components[3].offset, 1920*1080*2*2) + # self.assertEqual(grain.components[3].length, 1920*1080) + + # self.assertEqual(len(grain.components), 4) + # del grain.components[3] + # self.assertEqual(len(grain.components), 3) + + # grain.components[0] = {'stride': 1920, + # 'width': 1920, + # 'height': 1080, + # 'offset': 1920*1080*2*2, + # 'length': 1920*1080} + + # self.assertEqual(grain.components[0].stride, 1920) + # self.assertEqual(grain.components[0].width, 1920) + # self.assertEqual(grain.components[0].height, 1080) + # self.assertEqual(grain.components[0].offset, 1920*1080*2*2) + # self.assertEqual(grain.components[0].length, 1920*1080) + + # def test_video_grain_with_sparse_meta(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # meta = { + # "@_ns": "urn:x-ipstudio:ns:0.1", + # "grain": { + # "grain_type": "video", + # "source_id": str(src_id), + # "flow_id": str(flow_id), + # "origin_timestamp": str(ots), + # "sync_timestamp": str(sts), + # "creation_timestamp": str(cts), + # "rate": { + # "numerator": 25, + # "denominator": 1, + # }, + # "duration": { + # "numerator": 1, + # "denominator": 25, + # } + # }, + # } + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(meta) + + # self.assertEqual(grain.format, CogFrameFormat.UNKNOWN) + # self.assertEqual(grain.width, 0) + # self.assertEqual(grain.height, 0) + # self.assertEqual(grain.layout, CogFrameLayout.UNKNOWN) + # self.assertEqual(grain.extension, 0) + # self.assertEqual(len(grain.components), 0) + + # def test_video_grain_with_numeric_identifiers(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + # cog_frame_format=0x2805, + # width=1920, height=1080, + # cog_frame_layout=0) + + # self.assertEqual(grain.grain_type, "video") + # self.assertEqual(grain.source_id, src_id) + # self.assertEqual(grain.flow_id, flow_id) + # self.assertEqual(grain.origin_timestamp, ots) + # self.assertEqual(grain.final_origin_timestamp(), ots) + # self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) + # self.assertEqual(grain.sync_timestamp, sts) + # self.assertEqual(grain.creation_timestamp, cts) + # self.assertEqual(grain.rate, Fraction(25, 1)) + # self.assertEqual(grain.duration, Fraction(1, 25)) + # self.assertEqual(grain.timelabels, []) + # self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) + # self.assertEqual(grain.width, 1920) + # self.assertEqual(grain.height, 1080) + # self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + # self.assertEqual(grain.extension, 0) + # self.assertIsNone(grain.source_aspect_ratio) + # self.assertIsNone(grain.pixel_aspect_ratio) + + # self.assertEqual(len(grain.components), 3) + # self.assertEqual(grain.components[0].stride, 1920*2) + # self.assertEqual(grain.components[0].width, 1920) + # self.assertEqual(grain.components[0].height, 1080) + # self.assertEqual(grain.components[0].offset, 0) + # self.assertEqual(grain.components[0].length, 1920*1080*2) + + # self.assertEqual(grain.components[1].stride, 1920) + # self.assertEqual(grain.components[1].width, 1920/2) + # self.assertEqual(grain.components[1].height, 1080) + # self.assertEqual(grain.components[1].offset, 1920*1080*2) + # self.assertEqual(grain.components[1].length, 1920*1080) + + # self.assertEqual(grain.components[2].stride, 1920) + # self.assertEqual(grain.components[2].width, 1920/2) + # self.assertEqual(grain.components[2].height, 1080) + # self.assertEqual(grain.components[2].offset, 1920*1080*2 + 1920*1080) + # self.assertEqual(grain.components[2].length, 1920*1080) + + # self.assertIsInstance(grain.data, bytearray) + # self.assertEqual(len(grain.data), 1920*1080*2*2) + + # self.assertEqual(repr(grain), "VideoGrain({!r},< binary data of length {} >)".format(grain.meta, len(grain.data))) + + # self.assertEqual(dict(grain.components[0]), {'stride': 1920*2, + # 'width': 1920, + # 'height': 1080, + # 'offset': 0, + # 'length': 1920*1080*2}) + + # def test_video_grain_setters(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # grain.format = CogFrameFormat.S16_444 + # self.assertEqual(grain.format, CogFrameFormat.S16_444) + # grain.format = 0x0207 + # self.assertEqual(grain.format, CogFrameFormat.VC2) + + # grain.width = 2 + # self.assertEqual(grain.width, 2) + + # grain.height = 13 + # self.assertEqual(grain.height, 13) + + # grain.layout = CogFrameLayout.SEPARATE_FIELDS + # self.assertEqual(grain.layout, CogFrameLayout.SEPARATE_FIELDS) + # grain.layout = 0x02 + # self.assertEqual(grain.layout, CogFrameLayout.SINGLE_FIELD) + + # grain.extension = 1 + # self.assertEqual(grain.extension, 1) + + # grain.source_aspect_ratio = 50 + # self.assertEqual(grain.source_aspect_ratio, Fraction(50, 1)) + + # grain.pixel_aspect_ratio = 0.25 + # self.assertEqual(grain.pixel_aspect_ratio, Fraction(1, 4)) + + # def test_video_grain_fails_with_no_metadata(self): + # with self.assertRaises(AttributeError): + # VideoGrain(None) + + # def test_video_grain_create_with_ots_and_no_sts(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # self.assertEqual(grain.origin_timestamp, ots) + # self.assertEqual(grain.final_origin_timestamp(), ots) + # self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) + # self.assertEqual(grain.sync_timestamp, ots) + # self.assertEqual(grain.creation_timestamp, cts) + + # def test_video_grain_create_with_no_ots_and_no_sts(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # self.assertEqual(grain.origin_timestamp, cts) + # self.assertEqual(grain.final_origin_timestamp(), cts) + # self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) + # self.assertEqual(grain.sync_timestamp, cts) + # self.assertEqual(grain.creation_timestamp, cts) + + # def test_videograin_meta_is_json_serialisable(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # self.assertEqual(json.loads(json.dumps(grain.meta)), grain.meta) + + # def test_video_grain_normalise(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + + # with mock.patch.object(Timestamp, "get_time", return_value=ots): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, + # rate=Fraction(25, 1), + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # self.assertEqual(grain.origin_timestamp, ots) + # self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + # ots) + # self.assertEqual(grain.normalise_time(grain.origin_timestamp), + # ots.normalise(25, 1)) + # self.assertEqual(grain.final_origin_timestamp(), ots) + # self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + # TimeRange.from_single_timestamp(ots)) + # self.assertEqual(grain.normalise_time(grain.origin_timerange()), + # TimeRange.from_single_timestamp(ots).normalise(25, 1)) + + # def test_copy(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # grain.data[0] = 0x1B + # grain.data[1] = 0xBC + + # clone = copy(grain) + + # self.assertEqual(grain.data[0], clone.data[0]) + # self.assertEqual(grain.data[1], clone.data[1]) + + # grain.data[0] = 0xCA + # grain.data[1] = 0xFE + + # self.assertEqual(grain.data[0], clone.data[0]) + # self.assertEqual(grain.data[1], clone.data[1]) + + # def test_deepcopy(self): + # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + # cts = Timestamp.from_tai_sec_nsec("417798915:0") + # ots = Timestamp.from_tai_sec_nsec("417798915:5") + # sts = Timestamp.from_tai_sec_nsec("417798915:10") + + # with mock.patch.object(Timestamp, "get_time", return_value=cts): + # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + # cog_frame_format=CogFrameFormat.S16_422_10BIT, + # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + # grain.data[0] = 0x1B + # grain.data[1] = 0xBC + + # clone = deepcopy(grain) + + # self.assertEqual(grain.data[0], clone.data[0]) + # self.assertEqual(grain.data[1], clone.data[1]) + + # grain.data[0] = 0xCA + # grain.data[1] = 0xFE + + # self.assertNotEqual(grain.data[0], clone.data[0]) + # self.assertNotEqual(grain.data[1], clone.data[1]) From ed86a88b068979d36655f3da9f1023fc0dfe5f61 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 9 Oct 2019 17:38:10 +0100 Subject: [PATCH 32/76] numpy: Some initial working implementation, but lacking support for discontiguous frames, other formats, etc ... --- mediagrains/grain.py | 10 +- mediagrains_py36/numpy/__init__.py | 187 +++++----- tests/test36_numpy_videograin.py | 541 +++++++++-------------------- 3 files changed, 242 insertions(+), 496 deletions(-) diff --git a/mediagrains/grain.py b/mediagrains/grain.py index 5051d8d..0288448 100644 --- a/mediagrains/grain.py +++ b/mediagrains/grain.py @@ -793,7 +793,9 @@ class COMPONENT(Mapping): length The total length of the data for this component in bytes """ - def __init__(self, meta): + def __init__(self, parent, index, meta): + self.parent = parent + self.index = index self.meta = meta def __getitem__(self, key): @@ -862,16 +864,16 @@ def __init__(self, parent): self.parent = parent def __getitem__(self, key): - return VIDEOGRAIN.COMPONENT(self.parent.meta['grain']['cog_frame']['components'][key]) + return type(self.parent).COMPONENT(self, key, self.parent.meta['grain']['cog_frame']['components'][key]) def __setitem__(self, key, value): - self.parent.meta['grain']['cog_frame']['components'][key] = VIDEOGRAIN.COMPONENT(value) + self.parent.meta['grain']['cog_frame']['components'][key] = type(self.parent).COMPONENT(self, key, value) def __delitem__(self, key): del self.parent.meta['grain']['cog_frame']['components'][key] def insert(self, key, value): - self.parent.meta['grain']['cog_frame']['components'].insert(key, VIDEOGRAIN.COMPONENT(value)) + self.parent.meta['grain']['cog_frame']['components'].insert(key, type(self.parent).COMPONENT(self, key, value)) def __len__(self): return len(self.parent.meta['grain']['cog_frame']['components']) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 14726c7..ee7d6ca 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -25,6 +25,7 @@ from mediatimestamp.immutable import Timestamp from fractions import Fraction from uuid import UUID +from copy import copy, deepcopy import numpy as np @@ -34,112 +35,82 @@ __all__ = ['VideoGrain', 'VIDEOGRAIN'] +def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: + if fmt in [CogFrameFormat.U8_444, + CogFrameFormat.U8_422, + CogFrameFormat.U8_420, + CogFrameFormat.ALPHA_U8, + CogFrameFormat.YUYV, + CogFrameFormat.UYVY, + CogFrameFormat.AYUV, + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.xRGB, + CogFrameFormat.BGRx, + CogFrameFormat.xBGR, + CogFrameFormat.RGBA, + CogFrameFormat.ARGB, + CogFrameFormat.BGRA, + CogFrameFormat.ABGR]: + return np.dtype(np.uint8) + elif fmt in [CogFrameFormat.S16_444_10BIT, + CogFrameFormat.S16_422_10BIT, + CogFrameFormat.S16_420_10BIT, + CogFrameFormat.ALPHA_S16_10BIT, + CogFrameFormat.S16_444_12BIT, + CogFrameFormat.S16_422_12BIT, + CogFrameFormat.S16_420_12BIT, + CogFrameFormat.ALPHA_S16_12BIT, + CogFrameFormat.S16_444, + CogFrameFormat.S16_422, + CogFrameFormat.S16_420, + CogFrameFormat.ALPHA_S16]: + return np.dtype(np.int16) + elif fmt in [CogFrameFormat.S32_444, + CogFrameFormat.S32_422, + CogFrameFormat.S32_420, + CogFrameFormat.ALPHA_S32, + CogFrameFormat.v210]: + return np.dtype(np.int32) + else: + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + + class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): - pass - - -def VideoGrain( - src_id_or_meta: Optional[Union[UUID, dict]]=None, - flow_id_or_data: Optional[Union[UUID, SupportsBytes]]=None, - creation_timestamp: Optional[Timestamp]=None, - origin_timestamp: Optional[Timestamp]=None, - sync_timestamp: Optional[Timestamp]=None, - rate: Fraction=Fraction(25, 1), - duration: Fraction=Fraction(1, 25), - cog_frame_format: CogFrameLayout=CogFrameFormat.UNKNOWN, - width: int=1920, - height: int=1080, - cog_frame_layout: CogFrameLayout=CogFrameLayout.UNKNOWN, - src_id: Optional[UUID]=None, - source_id: Optional[UUID]=None, - format: Optional[CogFrameFormat]=None, - layout: Optional[CogFrameLayout]=None, - flow_id: Optional[UUID]=None, - data: Optional[SupportsBytes]=None) -> VIDEOGRAIN: - """\ -Function called to construct a video grain either from existing data or with new data. - -First method of calling: - - VideoGrain(meta, data) - -where meta is a dictionary containing the grain metadata, and data is a bytes-like -object which contains the grain's payload. - -A properly formated metadata dictionary for a Video Grain should look like: - - { - "@_ns": "urn:x-ipstudio:ns:0.1", - "grain": { - "grain_type": "audio", - "source_id": src_id, # str or uuid.UUID - "flow_id": flow_id, # str or uuid.UUID - "origin_timestamp": origin_timestamp, # str or mediatimestamps.Timestamp - "sync_timestamp": sync_timestamp, # str or mediatimestamps.Timestamp - "creation_timestamp": creation_timestamp, # str or mediatimestamps.Timestamp - "rate": { - "numerator": 0, # int - "denominator": 1, # int - }, - "duration": { - "numerator": 0, # int - "denominator": 1, # int - }, - "cog_frame": { - "format": cog_frame_format, # int or CogFrameFormat - "width": width, # int - "height": height, # int - "layout": cog_frame_layout, # int of CogFrameLayout - "extension": 0, # int - "components": [ - { - "stride": luma_stride, # int - "width": luma_width, # int - "height": luma_height, # int - "length": luma_length # int - }, - { - "stride": chroma_stride, # int - "width": chroma_width, # int - "height": chroma_height, # int - "length": chroma_length # int - }, - { - "stride": chroma_stride, # int - "width": chroma_width, # int - "height": chroma_height, # int - "length": chroma_length # int - }, - ] - } - } - } - -Alternatively it may be called as: - - VideoGrain(src_id, flow_id, - origin_timestamp=None, - sync_timestamp=None, - rate=Fraction(25, 1), - duration=Fraction(1, 25), - cog_frame_format=CogFrameFormat.UNKNOWN, - width=1920, - height=1080, - cog_frame_layout=CogFrameLayout.UNKNOWN, - data=None): - -in which case a new grain will be constructed with type "video" and the -specified metadata. If the data argument is None then a new bytearray object -will be constructed with size determined by the format, height, and width. -The components array will similarly be filled out automatically with correct -data for the format and size specified. - - -In either case the value returned by this function will be an instance of the -class mediagrains.grain.VIDEOGRAIN, and the data element stored within it will be an -instance of the class numpy.ndarray. - -(the parameters "source_id" and "src_id" are aliases for each other. source_id is probably prefered, -but src_id is kept avaialble for backwards compatibility) -""" - pass + def __init__(self, meta, data): + super().__init__(meta, data) + self._data = np.frombuffer(self._data, dtype=_dtype_from_cogframeformat(self.format)) + + def __copy__(self): + return VideoGrain(copy(self.meta), self.data) + + def __deepcopy__(self, memo): + return VideoGrain(deepcopy(self.meta), self.data.copy()) + + def __repr__(self): + if self.data is None: + return "{}({!r})".format(self._factory, self.meta) + else: + return "{}({!r},< numpy data of length {} >)".format(self._factory, self.meta, len(self.data)) + + class COMPONENT (bytesgrain.VIDEOGRAIN.COMPONENT): + def __init__(self, parent, index, meta): + super().__init__(parent, index, meta) + self.data = self.parent.parent.data[self.offset//self.parent.parent.data.itemsize:(self.offset + self.length)//self.parent.parent.data.itemsize] + if self.parent.parent.format != CogFrameFormat.v210: + self.data.shape = (self.height, self.width) + # It's nicer to list width, then height + self.data = self.data.transpose() + + +def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: + """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. + + Otherwise takes the same parameters as mediagrains.VideoGrain and returns the same grain converted into a mediagrains.numpy.VIDEOGRAIN + """ + if len(args) == 1 and isinstance(args[0], bytesgrain.VIDEOGRAIN): + rawgrain = args[0] + else: + rawgrain = bytesgrain_constructors.VideoGrain(*args, **kwargs) + + return VIDEOGRAIN(rawgrain.meta, rawgrain.data) diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index d10f37e..9a8fd80 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -26,6 +26,8 @@ import json from copy import copy, deepcopy +import numpy as np + class TestGrain (TestCase): def test_video_grain_create_YUV422_10bit(self): @@ -91,12 +93,6 @@ def test_video_grain_create_YUV422_10bit(self): self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) - self.assertEqual(len(grain.components), 3) - self.assertEqual(grain.components[0].stride, 1920*2) - self.assertEqual(grain.components[0].width, 1920) - self.assertEqual(grain.components[0].height, 1080) - self.assertEqual(grain.components[0].offset, 0) - self.assertEqual(grain.components[0].length, 1920*1080*2) self.assertIsInstance(grain.components[0].data, np.ndarray) self.assertEqual(grain.components[0].data.nbytes, 1920*1080*2) self.assertEqual(grain.components[0].data.dtype, np.dtype(np.int16)) @@ -105,11 +101,6 @@ def test_video_grain_create_YUV422_10bit(self): self.assertEqual(grain.components[0].data.ndim, 2) self.assertEqual(grain.components[0].data.shape, (1920, 1080)) - self.assertEqual(grain.components[1].stride, 1920) - self.assertEqual(grain.components[1].width, 1920//2) - self.assertEqual(grain.components[1].height, 1080) - self.assertEqual(grain.components[1].offset, 1920*1080*2) - self.assertEqual(grain.components[1].length, 1920*1080) self.assertIsInstance(grain.components[1].data, np.ndarray) self.assertEqual(grain.components[1].data.nbytes, 1920*1080) self.assertEqual(grain.components[1].data.dtype, np.dtype(np.int16)) @@ -118,11 +109,6 @@ def test_video_grain_create_YUV422_10bit(self): self.assertEqual(grain.components[1].data.ndim, 2) self.assertEqual(grain.components[1].data.shape, (1920//2, 1080)) - self.assertEqual(grain.components[2].stride, 1920) - self.assertEqual(grain.components[2].width, 1920//2) - self.assertEqual(grain.components[2].height, 1080) - self.assertEqual(grain.components[2].offset, 1920*1080*3) - self.assertEqual(grain.components[2].length, 1920*1080) self.assertIsInstance(grain.components[2].data, np.ndarray) self.assertEqual(grain.components[2].data.nbytes, 1920*1080) self.assertEqual(grain.components[2].data.dtype, np.dtype(np.int16)) @@ -134,375 +120,162 @@ def test_video_grain_create_YUV422_10bit(self): self.assertEqual(grain.expected_length, 1920*1080*4) # Test that changes to the component arrays are reflected in the main data array - for y in range(0, 1080): - for x in range(0, 1920//2): + for y in range(0, 16): + for x in range(0, 8): grain.components[0].data[2*x + 0, y] = (y*1920 + 2*x + 0)&0xFF grain.components[1].data[x, y] = (y*1920//2 + x)&0xFF + 0x100 - grain.components[0].data[2*x + 1, y] = (y*1920 + 2*x + 1)&0x3FF + grain.components[0].data[2*x + 1, y] = (y*1920 + 2*x + 1)&0xFF grain.components[2].data[x, y] = (y*1920//2 + x)&0xFF + 0x200 - for n in range(0, 1920*1080): - self.assertEqual(grain.data[n], n&0xFF) - for n in range(0, 1920*1080//2): - self.assertEqual(grain.data[1920*1080 + n], n&0xFF + 0x100) - for n in range(0, 1920*1080//2): - self.assertEqual(grain.data[3*1920*1080//2 + n], n&0xFF + 0x200) - - - # def test_video_grain_create_sizes(self): - # for (fmt, complens) in [ - # (CogFrameFormat.S32_444, (1920*1080*4, 1920*1080*4, 1920*1080*4)), - # (CogFrameFormat.S32_422, (1920*1080*4, 1920*1080*2, 1920*1080*2)), - # (CogFrameFormat.S32_420, (1920*1080*4, 1920*1080, 1920*1080)), - # (CogFrameFormat.S16_444_10BIT, (1920*1080*2, 1920*1080*2, 1920*1080*2)), - # (CogFrameFormat.S16_422_10BIT, (1920*1080*2, 1920*1080, 1920*1080)), - # (CogFrameFormat.S16_420_10BIT, (1920*1080*2, 1920*1080/2, 1920*1080/2)), - # (CogFrameFormat.U8_444, (1920*1080, 1920*1080, 1920*1080)), - # (CogFrameFormat.U8_422, (1920*1080, 1920*1080/2, 1920*1080/2)), - # (CogFrameFormat.U8_420, (1920*1080, 1920*1080/4, 1920*1080/4)), - # (CogFrameFormat.UYVY, (1920*1080*2,)), - # (CogFrameFormat.RGB, (1920*1080*3,)), - # (CogFrameFormat.RGBA, (1920*1080*4,)), - # (CogFrameFormat.v210, (40*128*1080,)), - # (CogFrameFormat.v216, (1920*1080*4,)), - # (CogFrameFormat.UNKNOWN, ()), - # ]: - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - # cog_frame_format=fmt, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # self.assertEqual(len(grain.components), len(complens)) - # offset = 0 - # for (complen, comp) in zip(complens, grain.components): - # self.assertEqual(complen, comp.length) - # self.assertEqual(offset, comp.offset) - # offset += complen - - # self.assertEqual(len(grain.data), offset) - - # def test_video_component_setters(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # grain.components[0].stride = 23 - # self.assertEqual(grain.components[0].stride, 23) - # grain.components[0].width = 23 - # self.assertEqual(grain.components[0].width, 23) - # grain.components[0].height = 23 - # self.assertEqual(grain.components[0].height, 23) - # grain.components[0].offset = 23 - # self.assertEqual(grain.components[0].offset, 23) - # grain.components[0].length = 23 - # self.assertEqual(grain.components[0].length, 23) - - # grain.components[0]['length'] = 17 - # self.assertEqual(grain.components[0].length, 17) - - # grain.components[0]['potato'] = 3 - # self.assertIn('potato', grain.components[0]) - # self.assertEqual(grain.components[0]['potato'], 3) - # del grain.components[0]['potato'] - # self.assertNotIn('potato', grain.components[0]) - - # grain.components.append({'stride': 1920, - # 'width': 1920, - # 'height': 1080, - # 'offset': 1920*1080*2*2, - # 'length': 1920*1080}) - - # self.assertEqual(grain.components[3].stride, 1920) - # self.assertEqual(grain.components[3].width, 1920) - # self.assertEqual(grain.components[3].height, 1080) - # self.assertEqual(grain.components[3].offset, 1920*1080*2*2) - # self.assertEqual(grain.components[3].length, 1920*1080) - - # self.assertEqual(len(grain.components), 4) - # del grain.components[3] - # self.assertEqual(len(grain.components), 3) - - # grain.components[0] = {'stride': 1920, - # 'width': 1920, - # 'height': 1080, - # 'offset': 1920*1080*2*2, - # 'length': 1920*1080} - - # self.assertEqual(grain.components[0].stride, 1920) - # self.assertEqual(grain.components[0].width, 1920) - # self.assertEqual(grain.components[0].height, 1080) - # self.assertEqual(grain.components[0].offset, 1920*1080*2*2) - # self.assertEqual(grain.components[0].length, 1920*1080) - - # def test_video_grain_with_sparse_meta(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # meta = { - # "@_ns": "urn:x-ipstudio:ns:0.1", - # "grain": { - # "grain_type": "video", - # "source_id": str(src_id), - # "flow_id": str(flow_id), - # "origin_timestamp": str(ots), - # "sync_timestamp": str(sts), - # "creation_timestamp": str(cts), - # "rate": { - # "numerator": 25, - # "denominator": 1, - # }, - # "duration": { - # "numerator": 1, - # "denominator": 25, - # } - # }, - # } - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(meta) - - # self.assertEqual(grain.format, CogFrameFormat.UNKNOWN) - # self.assertEqual(grain.width, 0) - # self.assertEqual(grain.height, 0) - # self.assertEqual(grain.layout, CogFrameLayout.UNKNOWN) - # self.assertEqual(grain.extension, 0) - # self.assertEqual(len(grain.components), 0) - - # def test_video_grain_with_numeric_identifiers(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - # cog_frame_format=0x2805, - # width=1920, height=1080, - # cog_frame_layout=0) - - # self.assertEqual(grain.grain_type, "video") - # self.assertEqual(grain.source_id, src_id) - # self.assertEqual(grain.flow_id, flow_id) - # self.assertEqual(grain.origin_timestamp, ots) - # self.assertEqual(grain.final_origin_timestamp(), ots) - # self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) - # self.assertEqual(grain.sync_timestamp, sts) - # self.assertEqual(grain.creation_timestamp, cts) - # self.assertEqual(grain.rate, Fraction(25, 1)) - # self.assertEqual(grain.duration, Fraction(1, 25)) - # self.assertEqual(grain.timelabels, []) - # self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) - # self.assertEqual(grain.width, 1920) - # self.assertEqual(grain.height, 1080) - # self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) - # self.assertEqual(grain.extension, 0) - # self.assertIsNone(grain.source_aspect_ratio) - # self.assertIsNone(grain.pixel_aspect_ratio) - - # self.assertEqual(len(grain.components), 3) - # self.assertEqual(grain.components[0].stride, 1920*2) - # self.assertEqual(grain.components[0].width, 1920) - # self.assertEqual(grain.components[0].height, 1080) - # self.assertEqual(grain.components[0].offset, 0) - # self.assertEqual(grain.components[0].length, 1920*1080*2) - - # self.assertEqual(grain.components[1].stride, 1920) - # self.assertEqual(grain.components[1].width, 1920/2) - # self.assertEqual(grain.components[1].height, 1080) - # self.assertEqual(grain.components[1].offset, 1920*1080*2) - # self.assertEqual(grain.components[1].length, 1920*1080) - - # self.assertEqual(grain.components[2].stride, 1920) - # self.assertEqual(grain.components[2].width, 1920/2) - # self.assertEqual(grain.components[2].height, 1080) - # self.assertEqual(grain.components[2].offset, 1920*1080*2 + 1920*1080) - # self.assertEqual(grain.components[2].length, 1920*1080) - - # self.assertIsInstance(grain.data, bytearray) - # self.assertEqual(len(grain.data), 1920*1080*2*2) - - # self.assertEqual(repr(grain), "VideoGrain({!r},< binary data of length {} >)".format(grain.meta, len(grain.data))) - - # self.assertEqual(dict(grain.components[0]), {'stride': 1920*2, - # 'width': 1920, - # 'height': 1080, - # 'offset': 0, - # 'length': 1920*1080*2}) - - # def test_video_grain_setters(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # grain.format = CogFrameFormat.S16_444 - # self.assertEqual(grain.format, CogFrameFormat.S16_444) - # grain.format = 0x0207 - # self.assertEqual(grain.format, CogFrameFormat.VC2) - - # grain.width = 2 - # self.assertEqual(grain.width, 2) - - # grain.height = 13 - # self.assertEqual(grain.height, 13) - - # grain.layout = CogFrameLayout.SEPARATE_FIELDS - # self.assertEqual(grain.layout, CogFrameLayout.SEPARATE_FIELDS) - # grain.layout = 0x02 - # self.assertEqual(grain.layout, CogFrameLayout.SINGLE_FIELD) - - # grain.extension = 1 - # self.assertEqual(grain.extension, 1) - - # grain.source_aspect_ratio = 50 - # self.assertEqual(grain.source_aspect_ratio, Fraction(50, 1)) - - # grain.pixel_aspect_ratio = 0.25 - # self.assertEqual(grain.pixel_aspect_ratio, Fraction(1, 4)) - - # def test_video_grain_fails_with_no_metadata(self): - # with self.assertRaises(AttributeError): - # VideoGrain(None) - - # def test_video_grain_create_with_ots_and_no_sts(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # self.assertEqual(grain.origin_timestamp, ots) - # self.assertEqual(grain.final_origin_timestamp(), ots) - # self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) - # self.assertEqual(grain.sync_timestamp, ots) - # self.assertEqual(grain.creation_timestamp, cts) - - # def test_video_grain_create_with_no_ots_and_no_sts(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # self.assertEqual(grain.origin_timestamp, cts) - # self.assertEqual(grain.final_origin_timestamp(), cts) - # self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) - # self.assertEqual(grain.sync_timestamp, cts) - # self.assertEqual(grain.creation_timestamp, cts) - - # def test_videograin_meta_is_json_serialisable(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # self.assertEqual(json.loads(json.dumps(grain.meta)), grain.meta) - - # def test_video_grain_normalise(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - - # with mock.patch.object(Timestamp, "get_time", return_value=ots): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, - # rate=Fraction(25, 1), - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # self.assertEqual(grain.origin_timestamp, ots) - # self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), - # ots) - # self.assertEqual(grain.normalise_time(grain.origin_timestamp), - # ots.normalise(25, 1)) - # self.assertEqual(grain.final_origin_timestamp(), ots) - # self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), - # TimeRange.from_single_timestamp(ots)) - # self.assertEqual(grain.normalise_time(grain.origin_timerange()), - # TimeRange.from_single_timestamp(ots).normalise(25, 1)) - - # def test_copy(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # grain.data[0] = 0x1B - # grain.data[1] = 0xBC - - # clone = copy(grain) - - # self.assertEqual(grain.data[0], clone.data[0]) - # self.assertEqual(grain.data[1], clone.data[1]) - - # grain.data[0] = 0xCA - # grain.data[1] = 0xFE - - # self.assertEqual(grain.data[0], clone.data[0]) - # self.assertEqual(grain.data[1], clone.data[1]) - - # def test_deepcopy(self): - # src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - # flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - # cts = Timestamp.from_tai_sec_nsec("417798915:0") - # ots = Timestamp.from_tai_sec_nsec("417798915:5") - # sts = Timestamp.from_tai_sec_nsec("417798915:10") - - # with mock.patch.object(Timestamp, "get_time", return_value=cts): - # grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - # cog_frame_format=CogFrameFormat.S16_422_10BIT, - # width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - # grain.data[0] = 0x1B - # grain.data[1] = 0xBC - - # clone = deepcopy(grain) - - # self.assertEqual(grain.data[0], clone.data[0]) - # self.assertEqual(grain.data[1], clone.data[1]) - - # grain.data[0] = 0xCA - # grain.data[1] = 0xFE - - # self.assertNotEqual(grain.data[0], clone.data[0]) - # self.assertNotEqual(grain.data[1], clone.data[1]) + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*1920 + 2*x + 0], (y*1920 + 2*x + 0)&0xFF) + self.assertEqual(grain.data[y*1920 + 2*x + 1], (y*1920 + 2*x + 1)&0xFF) + self.assertEqual(grain.data[1920*1080 + y*1920//2 + x], (y*1920//2 + x)&0xFF + 0x100) + self.assertEqual(grain.data[3*1920*1080//2 + y*1920//2 + x], (y*1920//2 + x)&0xFF + 0x200) + + def test_video_grain_create_YUV444_10bit(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=CogFrameFormat.S16_444_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertEqual(grain.grain_type, "video") + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ots) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.sync_timestamp, sts) + self.assertEqual(grain.creation_timestamp, cts) + self.assertEqual(grain.rate, Fraction(25, 1)) + self.assertEqual(grain.duration, Fraction(1, 25)) + self.assertEqual(grain.timelabels, []) + self.assertEqual(grain.format, CogFrameFormat.S16_444_10BIT) + self.assertEqual(grain.width, 1920) + self.assertEqual(grain.height, 1080) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.extension, 0) + self.assertIsNone(grain.source_aspect_ratio) + self.assertIsNone(grain.pixel_aspect_ratio) + + self.assertEqual(len(grain.components), 3) + self.assertEqual(grain.components[0].stride, 1920*2) + self.assertEqual(grain.components[0].width, 1920) + self.assertEqual(grain.components[0].height, 1080) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, 1920*1080*2) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.components[1].stride, 1920*2) + self.assertEqual(grain.components[1].width, 1920) + self.assertEqual(grain.components[1].height, 1080) + self.assertEqual(grain.components[1].offset, 1920*1080*2) + self.assertEqual(grain.components[1].length, 1920*1080*2) + self.assertEqual(len(grain.components[1]), 5) + + self.assertEqual(grain.components[2].stride, 1920*2) + self.assertEqual(grain.components[2].width, 1920) + self.assertEqual(grain.components[2].height, 1080) + self.assertEqual(grain.components[2].offset, 1920*1080*4) + self.assertEqual(grain.components[2].length, 1920*1080*2) + self.assertEqual(len(grain.components[2]), 5) + + self.assertIsInstance(grain.data, np.ndarray) + self.assertEqual(grain.data.nbytes, 1920*1080*2*3) + self.assertEqual(grain.data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.data.size, 1920*1080*3) + self.assertEqual(grain.data.itemsize, 2) + self.assertEqual(grain.data.ndim, 1) + self.assertEqual(grain.data.shape, (1920*1080*3,)) + + self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) + + self.assertIsInstance(grain.components[0].data, np.ndarray) + self.assertEqual(grain.components[0].data.nbytes, 1920*1080*2) + self.assertEqual(grain.components[0].data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.components[0].data.size, 1920*1080) + self.assertEqual(grain.components[0].data.itemsize, 2) + self.assertEqual(grain.components[0].data.ndim, 2) + self.assertEqual(grain.components[0].data.shape, (1920, 1080)) + + self.assertIsInstance(grain.components[1].data, np.ndarray) + self.assertEqual(grain.components[1].data.nbytes, 1920*1080*2) + self.assertEqual(grain.components[1].data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.components[1].data.size, 1920*1080) + self.assertEqual(grain.components[1].data.itemsize, 2) + self.assertEqual(grain.components[1].data.ndim, 2) + self.assertEqual(grain.components[1].data.shape, (1920, 1080)) + + self.assertIsInstance(grain.components[2].data, np.ndarray) + self.assertEqual(grain.components[2].data.nbytes, 1920*1080*2) + self.assertEqual(grain.components[2].data.dtype, np.dtype(np.int16)) + self.assertEqual(grain.components[2].data.size, 1920*1080) + self.assertEqual(grain.components[2].data.itemsize, 2) + self.assertEqual(grain.components[2].data.ndim, 2) + self.assertEqual(grain.components[2].data.shape, (1920, 1080)) + + self.assertEqual(grain.expected_length, 1920*1080*2*3) + + # Test that changes to the component arrays are reflected in the main data array + for y in range(0, 16): + for x in range(0, 16): + grain.components[0].data[x, y] = (y*1920 + x)&0xFF + grain.components[1].data[x, y] = (y*1920 + x)&0xFF + 0x100 + grain.components[2].data[x, y] = (y*1920 + x)&0xFF + 0x200 + + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*1920 + x], (y*1920 + x)&0xFF) + self.assertEqual(grain.data[1920*1080 + y*1920 + x], (y*1920 + x)&0xFF + 0x100) + self.assertEqual(grain.data[2*1920*1080 + y*1920 + x], (y*1920 + x)&0xFF + 0x200) + + def test_copy(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + grain.data[0] = 0x1BBC + + clone = copy(grain) + + self.assertEqual(grain.data[0], clone.data[0]) + + grain.data[0] = 0xCAFE + + self.assertEqual(grain.data[0], clone.data[0]) + + def test_deepcopy(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + grain.data[0] = 0x1BBC + + clone = deepcopy(grain) + + self.assertEqual(grain.data[0], clone.data[0]) + + grain.data[0] = 0xCAFE + + self.assertNotEqual(grain.data[0], clone.data[0]) From 8de88ad1269ccd47a813bbf9b8a409d6d0ffd84c Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 11:45:31 +0100 Subject: [PATCH 33/76] cogenums: Added support for planar RGB formats --- mediagrains/cogenums.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index e885d1a..d5949c5 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -25,7 +25,15 @@ from enum import IntEnum -__all__ = ['CogFrameFormat', 'CogFrameLayout', 'CogAudioFormat', 'COG_FRAME_IS_PACKED', 'COG_FRAME_IS_COMPRESSED', 'COG_FRAME_FORMAT_BYTES_PER_VALUE'] +__all__ = [ + 'CogFrameFormat', + 'CogFrameLayout', + 'CogAudioFormat', + 'COG_FRAME_IS_PACKED', + 'COG_FRAME_IS_COMPRESSED', + 'COG_FRAME_FORMAT_BYTES_PER_VALUE', + 'COG_FRAME_FORMAT_H_SHIFT', + 'COG_FRAME_FORMAT_V_SHIFT'] class CogFrameFormat(IntEnum): @@ -42,6 +50,7 @@ class CogFrameFormat(IntEnum): U8_444 = 0x2000 U8_422 = 0x2001 U8_420 = 0x2003 + U8_444_RGB = 0x2010 ALPHA_U8 = 0x2080 YUYV = 0x2100 UYVY = 0x2101 @@ -56,20 +65,24 @@ class CogFrameFormat(IntEnum): BGRA = 0x2116 ABGR = 0x2117 S16_444_10BIT = 0x2804 + S16_444_10BIT_RGB = 0x2814 S16_422_10BIT = 0x2805 S16_420_10BIT = 0x2807 ALPHA_S16_10BIT = 0x2884 v210 = 0x2906 S16_444_12BIT = 0x3004 + S16_444_12BIT_RGB = 0x3014 S16_422_12BIT = 0x3005 S16_420_12BIT = 0x3007 ALPHA_S16_12BIT = 0x3084 S16_444 = 0x4004 + S16_444_RGB = 0x4014 S16_422 = 0x4005 S16_420 = 0x4007 ALPHA_S16 = 0x4084 v216 = 0x4105 S32_444 = 0x8008 + S32_444_RGB = 0x8018 S32_422 = 0x8009 S32_420 = 0x800b ALPHA_S32 = 0x8088 @@ -117,6 +130,14 @@ def COG_FRAME_IS_COMPRESSED(fmt): return ((fmt >> 9) & 0x1) != 0 +def COG_FRAME_IS_ALPHA(fmt): + return ((fmt >> 7) & 0x1) != 0 + + +def COG_FRAME_IS_RGB(fmt): + return ((fmt >> 4) & 0x1) != 0 + + def COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt): if ((fmt) & 0xc) == 0: return 1 @@ -124,3 +145,11 @@ def COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt): return 2 else: return 4 + + +def COG_FRAME_FORMAT_H_SHIFT(fmt): + return (fmt & 0x1) + + +def COG_FRAME_FORMAT_V_SHIFT(fmt): + return ((fmt >> 1) & 0x1) \ No newline at end of file From a8d50a6cd41a1cc4b2731445832f04c74c6f7cfb Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 11:46:25 +0100 Subject: [PATCH 34/76] numpy: Added construction and access to numpy-backed mediagrains for planar formats (including planar RGB). Currently no support for numpy access to packed formats --- mediagrains/numpy.py | 4 +- mediagrains_py36/numpy/__init__.py | 58 ++--- tests/test36_numpy_videograin.py | 351 ++++++++++++----------------- 3 files changed, 165 insertions(+), 248 deletions(-) diff --git a/mediagrains/numpy.py b/mediagrains/numpy.py index 9c7bbb9..496b9d6 100644 --- a/mediagrains/numpy.py +++ b/mediagrains/numpy.py @@ -22,8 +22,8 @@ from sys import version_info if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - from mediagrains_py36.numpy import * # noqa: F401 + from mediagrains_py36.numpy import VideoGrain, VIDEOGRAIN # noqa: F401 - __all__ = [] + __all__ = ['VideoGrain', 'VIDEOGRAIN'] else: __all__ = [] diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index ee7d6ca..76d3668 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -19,61 +19,31 @@ Library for handling mediagrains in numpy arrays """ -from mediagrains.cogenums import CogFrameFormat, CogFrameLayout +from mediagrains.cogenums import ( + CogFrameFormat, + COG_FRAME_IS_PACKED, + COG_FRAME_IS_COMPRESSED, + COG_FRAME_FORMAT_BYTES_PER_VALUE) from mediagrains import grain as bytesgrain from mediagrains import grain_constructors as bytesgrain_constructors -from mediatimestamp.immutable import Timestamp -from fractions import Fraction -from uuid import UUID from copy import copy, deepcopy import numpy as np -from typing import Union, Optional, SupportsBytes - __all__ = ['VideoGrain', 'VIDEOGRAIN'] def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: - if fmt in [CogFrameFormat.U8_444, - CogFrameFormat.U8_422, - CogFrameFormat.U8_420, - CogFrameFormat.ALPHA_U8, - CogFrameFormat.YUYV, - CogFrameFormat.UYVY, - CogFrameFormat.AYUV, - CogFrameFormat.RGB, - CogFrameFormat.RGBx, - CogFrameFormat.xRGB, - CogFrameFormat.BGRx, - CogFrameFormat.xBGR, - CogFrameFormat.RGBA, - CogFrameFormat.ARGB, - CogFrameFormat.BGRA, - CogFrameFormat.ABGR]: - return np.dtype(np.uint8) - elif fmt in [CogFrameFormat.S16_444_10BIT, - CogFrameFormat.S16_422_10BIT, - CogFrameFormat.S16_420_10BIT, - CogFrameFormat.ALPHA_S16_10BIT, - CogFrameFormat.S16_444_12BIT, - CogFrameFormat.S16_422_12BIT, - CogFrameFormat.S16_420_12BIT, - CogFrameFormat.ALPHA_S16_12BIT, - CogFrameFormat.S16_444, - CogFrameFormat.S16_422, - CogFrameFormat.S16_420, - CogFrameFormat.ALPHA_S16]: - return np.dtype(np.int16) - elif fmt in [CogFrameFormat.S32_444, - CogFrameFormat.S32_422, - CogFrameFormat.S32_420, - CogFrameFormat.ALPHA_S32, - CogFrameFormat.v210]: - return np.dtype(np.int32) - else: - raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: + return np.dtype(np.uint8) + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 2: + return np.dtype(np.int16) + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: + return np.dtype(np.int32) + + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 9a8fd80..f73816d 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -19,222 +19,169 @@ import uuid from mediagrains.numpy import VideoGrain -from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat -from mediatimestamp.immutable import Timestamp, TimeOffset, TimeRange +from mediagrains.cogenums import ( + CogFrameFormat, + CogFrameLayout, + COG_FRAME_FORMAT_BYTES_PER_VALUE, + COG_FRAME_FORMAT_H_SHIFT, + COG_FRAME_FORMAT_V_SHIFT) +from mediatimestamp.immutable import Timestamp, TimeRange import mock from fractions import Fraction -import json from copy import copy, deepcopy import numpy as np class TestGrain (TestCase): - def test_video_grain_create_YUV422_10bit(self): + def assertIsVideoGrain(self, + fmt, + src_id=uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429"), + flow_id=uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb"), + ots=Timestamp.from_tai_sec_nsec("417798915:5"), + sts=Timestamp.from_tai_sec_nsec("417798915:10"), + cts=Timestamp.from_tai_sec_nsec("417798915:0"), + rate=Fraction(25, 1), + width=1920, + height=1080): + def __inner(grain): + self.assertEqual(grain.grain_type, "video") + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ots) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.sync_timestamp, sts) + self.assertEqual(grain.creation_timestamp, cts) + self.assertEqual(grain.rate, rate) + self.assertEqual(grain.duration, 1/rate) + self.assertEqual(grain.timelabels, []) + self.assertEqual(grain.format, fmt) + self.assertEqual(grain.width, width) + self.assertEqual(grain.height, height) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.extension, 0) + self.assertIsNone(grain.source_aspect_ratio) + self.assertIsNone(grain.pixel_aspect_ratio) + + bps = COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) + hs = COG_FRAME_FORMAT_H_SHIFT(fmt) + vs = COG_FRAME_FORMAT_V_SHIFT(fmt) + + self.assertEqual(len(grain.components), 3) + self.assertEqual(grain.components[0].stride, width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.components[1].stride, width*bps >> hs) + self.assertEqual(grain.components[1].width, width >> hs) + self.assertEqual(grain.components[1].height, height >> vs) + self.assertEqual(grain.components[1].offset, width*height*bps) + self.assertEqual(grain.components[1].length, width*height*bps >> (hs + vs)) + self.assertEqual(len(grain.components[1]), 5) + + self.assertEqual(grain.components[2].stride, width*bps >> hs) + self.assertEqual(grain.components[2].width, width >> hs) + self.assertEqual(grain.components[2].height, height >> vs) + self.assertEqual(grain.components[2].offset, width*height*bps + (width*height*bps >> (hs + vs))) + self.assertEqual(grain.components[2].length, width*height*bps >> (hs + vs)) + self.assertEqual(len(grain.components[2]), 5) + + if bps == 1: + dtype = np.dtype(np.uint8) + else: + dtype = np.dtype(np.int16) + + self.assertIsInstance(grain.data, np.ndarray) + self.assertEqual(grain.data.nbytes, width*height*bps + 2*(width*height*bps >> (hs + vs))) + self.assertEqual(grain.data.dtype, dtype) + self.assertEqual(grain.data.size, width*height + 2*(width*height >> (hs + vs))) + self.assertEqual(grain.data.itemsize, bps) + self.assertEqual(grain.data.ndim, 1) + self.assertEqual(grain.data.shape, (width*height + 2*(width*height >> (hs + vs)),)) + + self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) + + self.assertIsInstance(grain.components[0].data, np.ndarray) + self.assertEqual(grain.components[0].data.nbytes, width*height*bps) + self.assertEqual(grain.components[0].data.dtype, dtype) + self.assertEqual(grain.components[0].data.size, width*height) + self.assertEqual(grain.components[0].data.itemsize, bps) + self.assertEqual(grain.components[0].data.ndim, 2) + self.assertEqual(grain.components[0].data.shape, (width, height)) + + self.assertIsInstance(grain.components[1].data, np.ndarray) + self.assertEqual(grain.components[1].data.nbytes, width*height*bps >> (hs + vs)) + self.assertEqual(grain.components[1].data.dtype, dtype) + self.assertEqual(grain.components[1].data.size, width*height >> (hs + vs)) + self.assertEqual(grain.components[1].data.itemsize, bps) + self.assertEqual(grain.components[1].data.ndim, 2) + self.assertEqual(grain.components[1].data.shape, (width >> hs, height >> vs)) + + self.assertIsInstance(grain.components[2].data, np.ndarray) + self.assertEqual(grain.components[2].data.nbytes, width*height*bps >> (hs + vs)) + self.assertEqual(grain.components[2].data.dtype, dtype) + self.assertEqual(grain.components[2].data.size, width*height >> (hs + vs)) + self.assertEqual(grain.components[2].data.itemsize, bps) + self.assertEqual(grain.components[2].data.ndim, 2) + self.assertEqual(grain.components[2].data.shape, (width >> hs, height >> vs)) + + self.assertEqual(grain.expected_length, (width*height + 2*(width >> hs)*(height >> vs))*bps) + + # Test that changes to the component arrays are reflected in the main data array + for y in range(0, 16): + for x in range(0, 16): + grain.components[0].data[x, y] = (y*width + x) & 0x3F + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + grain.components[1].data[x, y] = (y*(width >> hs) + x) & 0x3F + 0x40 + grain.components[2].data[x, y] = (y*(width >> hs) + x) & 0x3F + 0x50 + + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width + x], (y*width + x) & 0x3F) + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*(width >> hs) + x) & 0x3F + 0x40) + self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*(width >> hs) + x) & 0x3F + 0x50) + + return __inner + + def test_video_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") cts = Timestamp.from_tai_sec_nsec("417798915:0") ots = Timestamp.from_tai_sec_nsec("417798915:5") sts = Timestamp.from_tai_sec_nsec("417798915:10") - with mock.patch.object(Timestamp, "get_time", return_value=cts): - grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - cog_frame_format=CogFrameFormat.S16_422_10BIT, - width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - self.assertEqual(grain.grain_type, "video") - self.assertEqual(grain.source_id, src_id) - self.assertEqual(grain.flow_id, flow_id) - self.assertEqual(grain.origin_timestamp, ots) - self.assertEqual(grain.final_origin_timestamp(), ots) - self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) - self.assertEqual(grain.sync_timestamp, sts) - self.assertEqual(grain.creation_timestamp, cts) - self.assertEqual(grain.rate, Fraction(25, 1)) - self.assertEqual(grain.duration, Fraction(1, 25)) - self.assertEqual(grain.timelabels, []) - self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) - self.assertEqual(grain.width, 1920) - self.assertEqual(grain.height, 1080) - self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) - self.assertEqual(grain.extension, 0) - self.assertIsNone(grain.source_aspect_ratio) - self.assertIsNone(grain.pixel_aspect_ratio) - - self.assertEqual(len(grain.components), 3) - self.assertEqual(grain.components[0].stride, 1920*2) - self.assertEqual(grain.components[0].width, 1920) - self.assertEqual(grain.components[0].height, 1080) - self.assertEqual(grain.components[0].offset, 0) - self.assertEqual(grain.components[0].length, 1920*1080*2) - self.assertEqual(len(grain.components[0]), 5) - - self.assertEqual(grain.components[1].stride, 1920) - self.assertEqual(grain.components[1].width, 1920/2) - self.assertEqual(grain.components[1].height, 1080) - self.assertEqual(grain.components[1].offset, 1920*1080*2) - self.assertEqual(grain.components[1].length, 1920*1080) - self.assertEqual(len(grain.components[1]), 5) - - self.assertEqual(grain.components[2].stride, 1920) - self.assertEqual(grain.components[2].width, 1920/2) - self.assertEqual(grain.components[2].height, 1080) - self.assertEqual(grain.components[2].offset, 1920*1080*2 + 1920*1080) - self.assertEqual(grain.components[2].length, 1920*1080) - self.assertEqual(len(grain.components[2]), 5) - - self.assertIsInstance(grain.data, np.ndarray) - self.assertEqual(grain.data.nbytes, 1920*1080*2*2) - self.assertEqual(grain.data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.data.size, 1920*1080*2) - self.assertEqual(grain.data.itemsize, 2) - self.assertEqual(grain.data.ndim, 1) - self.assertEqual(grain.data.shape, (1920*1080*2,)) - - self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) - - self.assertIsInstance(grain.components[0].data, np.ndarray) - self.assertEqual(grain.components[0].data.nbytes, 1920*1080*2) - self.assertEqual(grain.components[0].data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.components[0].data.size, 1920*1080) - self.assertEqual(grain.components[0].data.itemsize, 2) - self.assertEqual(grain.components[0].data.ndim, 2) - self.assertEqual(grain.components[0].data.shape, (1920, 1080)) - - self.assertIsInstance(grain.components[1].data, np.ndarray) - self.assertEqual(grain.components[1].data.nbytes, 1920*1080) - self.assertEqual(grain.components[1].data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.components[1].data.size, 1920*1080//2) - self.assertEqual(grain.components[1].data.itemsize, 2) - self.assertEqual(grain.components[1].data.ndim, 2) - self.assertEqual(grain.components[1].data.shape, (1920//2, 1080)) - - self.assertIsInstance(grain.components[2].data, np.ndarray) - self.assertEqual(grain.components[2].data.nbytes, 1920*1080) - self.assertEqual(grain.components[2].data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.components[2].data.size, 1920*1080//2) - self.assertEqual(grain.components[2].data.itemsize, 2) - self.assertEqual(grain.components[2].data.ndim, 2) - self.assertEqual(grain.components[2].data.shape, (1920//2, 1080)) - - self.assertEqual(grain.expected_length, 1920*1080*4) - - # Test that changes to the component arrays are reflected in the main data array - for y in range(0, 16): - for x in range(0, 8): - grain.components[0].data[2*x + 0, y] = (y*1920 + 2*x + 0)&0xFF - grain.components[1].data[x, y] = (y*1920//2 + x)&0xFF + 0x100 - grain.components[0].data[2*x + 1, y] = (y*1920 + 2*x + 1)&0xFF - grain.components[2].data[x, y] = (y*1920//2 + x)&0xFF + 0x200 - - for y in range(0, 16): - for x in range(0, 8): - self.assertEqual(grain.data[y*1920 + 2*x + 0], (y*1920 + 2*x + 0)&0xFF) - self.assertEqual(grain.data[y*1920 + 2*x + 1], (y*1920 + 2*x + 1)&0xFF) - self.assertEqual(grain.data[1920*1080 + y*1920//2 + x], (y*1920//2 + x)&0xFF + 0x100) - self.assertEqual(grain.data[3*1920*1080//2 + y*1920//2 + x], (y*1920//2 + x)&0xFF + 0x200) - - def test_video_grain_create_YUV444_10bit(self): - src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") - flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") - cts = Timestamp.from_tai_sec_nsec("417798915:0") - ots = Timestamp.from_tai_sec_nsec("417798915:5") - sts = Timestamp.from_tai_sec_nsec("417798915:10") - - with mock.patch.object(Timestamp, "get_time", return_value=cts): - grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - cog_frame_format=CogFrameFormat.S16_444_10BIT, - width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - self.assertEqual(grain.grain_type, "video") - self.assertEqual(grain.source_id, src_id) - self.assertEqual(grain.flow_id, flow_id) - self.assertEqual(grain.origin_timestamp, ots) - self.assertEqual(grain.final_origin_timestamp(), ots) - self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) - self.assertEqual(grain.sync_timestamp, sts) - self.assertEqual(grain.creation_timestamp, cts) - self.assertEqual(grain.rate, Fraction(25, 1)) - self.assertEqual(grain.duration, Fraction(1, 25)) - self.assertEqual(grain.timelabels, []) - self.assertEqual(grain.format, CogFrameFormat.S16_444_10BIT) - self.assertEqual(grain.width, 1920) - self.assertEqual(grain.height, 1080) - self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) - self.assertEqual(grain.extension, 0) - self.assertIsNone(grain.source_aspect_ratio) - self.assertIsNone(grain.pixel_aspect_ratio) - - self.assertEqual(len(grain.components), 3) - self.assertEqual(grain.components[0].stride, 1920*2) - self.assertEqual(grain.components[0].width, 1920) - self.assertEqual(grain.components[0].height, 1080) - self.assertEqual(grain.components[0].offset, 0) - self.assertEqual(grain.components[0].length, 1920*1080*2) - self.assertEqual(len(grain.components[0]), 5) - - self.assertEqual(grain.components[1].stride, 1920*2) - self.assertEqual(grain.components[1].width, 1920) - self.assertEqual(grain.components[1].height, 1080) - self.assertEqual(grain.components[1].offset, 1920*1080*2) - self.assertEqual(grain.components[1].length, 1920*1080*2) - self.assertEqual(len(grain.components[1]), 5) - - self.assertEqual(grain.components[2].stride, 1920*2) - self.assertEqual(grain.components[2].width, 1920) - self.assertEqual(grain.components[2].height, 1080) - self.assertEqual(grain.components[2].offset, 1920*1080*4) - self.assertEqual(grain.components[2].length, 1920*1080*2) - self.assertEqual(len(grain.components[2]), 5) - - self.assertIsInstance(grain.data, np.ndarray) - self.assertEqual(grain.data.nbytes, 1920*1080*2*3) - self.assertEqual(grain.data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.data.size, 1920*1080*3) - self.assertEqual(grain.data.itemsize, 2) - self.assertEqual(grain.data.ndim, 1) - self.assertEqual(grain.data.shape, (1920*1080*3,)) - - self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) - - self.assertIsInstance(grain.components[0].data, np.ndarray) - self.assertEqual(grain.components[0].data.nbytes, 1920*1080*2) - self.assertEqual(grain.components[0].data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.components[0].data.size, 1920*1080) - self.assertEqual(grain.components[0].data.itemsize, 2) - self.assertEqual(grain.components[0].data.ndim, 2) - self.assertEqual(grain.components[0].data.shape, (1920, 1080)) - - self.assertIsInstance(grain.components[1].data, np.ndarray) - self.assertEqual(grain.components[1].data.nbytes, 1920*1080*2) - self.assertEqual(grain.components[1].data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.components[1].data.size, 1920*1080) - self.assertEqual(grain.components[1].data.itemsize, 2) - self.assertEqual(grain.components[1].data.ndim, 2) - self.assertEqual(grain.components[1].data.shape, (1920, 1080)) - - self.assertIsInstance(grain.components[2].data, np.ndarray) - self.assertEqual(grain.components[2].data.nbytes, 1920*1080*2) - self.assertEqual(grain.components[2].data.dtype, np.dtype(np.int16)) - self.assertEqual(grain.components[2].data.size, 1920*1080) - self.assertEqual(grain.components[2].data.itemsize, 2) - self.assertEqual(grain.components[2].data.ndim, 2) - self.assertEqual(grain.components[2].data.shape, (1920, 1080)) - - self.assertEqual(grain.expected_length, 1920*1080*2*3) - - # Test that changes to the component arrays are reflected in the main data array - for y in range(0, 16): - for x in range(0, 16): - grain.components[0].data[x, y] = (y*1920 + x)&0xFF - grain.components[1].data[x, y] = (y*1920 + x)&0xFF + 0x100 - grain.components[2].data[x, y] = (y*1920 + x)&0xFF + 0x200 - - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*1920 + x], (y*1920 + x)&0xFF) - self.assertEqual(grain.data[1920*1080 + y*1920 + x], (y*1920 + x)&0xFF + 0x100) - self.assertEqual(grain.data[2*1920*1080 + y*1920 + x], (y*1920 + x)&0xFF + 0x200) + for fmt in [CogFrameFormat.S16_444_10BIT, + CogFrameFormat.S16_422_10BIT, + CogFrameFormat.S16_420_10BIT, + CogFrameFormat.S16_444_12BIT, + CogFrameFormat.S16_422_12BIT, + CogFrameFormat.S16_420_12BIT, + CogFrameFormat.S16_444, + CogFrameFormat.S16_422, + CogFrameFormat.S16_420, + CogFrameFormat.U8_444, + CogFrameFormat.U8_422, + CogFrameFormat.U8_420, + CogFrameFormat.U8_444_RGB, + CogFrameFormat.S16_444_RGB, + CogFrameFormat.S16_444_12BIT_RGB, + CogFrameFormat.S16_444_10BIT_RGB]: + with self.subTest(fmt=fmt): + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=fmt, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertIsVideoGrain(fmt)(grain) def test_copy(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From eb405e024c8b530262764ef0cbd6b8cc74ffb7ee Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 13:36:49 +0100 Subject: [PATCH 35/76] numpy: Improved interface for component array access --- mediagrains_py36/numpy/__init__.py | 28 ++++++++++------ tests/test36_numpy_videograin.py | 52 +++++++++++++++--------------- 2 files changed, 45 insertions(+), 35 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 76d3668..03e5fd9 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -46,10 +46,29 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") +def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + arrays = [] + for component in components: + component_data = data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize] + component_data.shape = (component.height, component.width) + arrays.append(component_data.transpose()) + return arrays + + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + + class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): def __init__(self, meta, data): super().__init__(meta, data) self._data = np.frombuffer(self._data, dtype=_dtype_from_cogframeformat(self.format)) + self.component_data = _component_arrays_for_data_and_type(self._data, self.format, self.components) + + def __array__(self): + return np.array(self.data) + + def __bytes__(self): + return bytes(self.data) def __copy__(self): return VideoGrain(copy(self.meta), self.data) @@ -63,15 +82,6 @@ def __repr__(self): else: return "{}({!r},< numpy data of length {} >)".format(self._factory, self.meta, len(self.data)) - class COMPONENT (bytesgrain.VIDEOGRAIN.COMPONENT): - def __init__(self, parent, index, meta): - super().__init__(parent, index, meta) - self.data = self.parent.parent.data[self.offset//self.parent.parent.data.itemsize:(self.offset + self.length)//self.parent.parent.data.itemsize] - if self.parent.parent.format != CogFrameFormat.v210: - self.data.shape = (self.height, self.width) - # It's nicer to list width, then height - self.data = self.data.transpose() - def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index f73816d..21518a4 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -105,41 +105,41 @@ def __inner(grain): self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) - self.assertIsInstance(grain.components[0].data, np.ndarray) - self.assertEqual(grain.components[0].data.nbytes, width*height*bps) - self.assertEqual(grain.components[0].data.dtype, dtype) - self.assertEqual(grain.components[0].data.size, width*height) - self.assertEqual(grain.components[0].data.itemsize, bps) - self.assertEqual(grain.components[0].data.ndim, 2) - self.assertEqual(grain.components[0].data.shape, (width, height)) - - self.assertIsInstance(grain.components[1].data, np.ndarray) - self.assertEqual(grain.components[1].data.nbytes, width*height*bps >> (hs + vs)) - self.assertEqual(grain.components[1].data.dtype, dtype) - self.assertEqual(grain.components[1].data.size, width*height >> (hs + vs)) - self.assertEqual(grain.components[1].data.itemsize, bps) - self.assertEqual(grain.components[1].data.ndim, 2) - self.assertEqual(grain.components[1].data.shape, (width >> hs, height >> vs)) - - self.assertIsInstance(grain.components[2].data, np.ndarray) - self.assertEqual(grain.components[2].data.nbytes, width*height*bps >> (hs + vs)) - self.assertEqual(grain.components[2].data.dtype, dtype) - self.assertEqual(grain.components[2].data.size, width*height >> (hs + vs)) - self.assertEqual(grain.components[2].data.itemsize, bps) - self.assertEqual(grain.components[2].data.ndim, 2) - self.assertEqual(grain.components[2].data.shape, (width >> hs, height >> vs)) + self.assertIsInstance(grain.component_data[0], np.ndarray) + self.assertEqual(grain.component_data[0].nbytes, width*height*bps) + self.assertEqual(grain.component_data[0].dtype, dtype) + self.assertEqual(grain.component_data[0].size, width*height) + self.assertEqual(grain.component_data[0].itemsize, bps) + self.assertEqual(grain.component_data[0].ndim, 2) + self.assertEqual(grain.component_data[0].shape, (width, height)) + + self.assertIsInstance(grain.component_data[1], np.ndarray) + self.assertEqual(grain.component_data[1].nbytes, width*height*bps >> (hs + vs)) + self.assertEqual(grain.component_data[1].dtype, dtype) + self.assertEqual(grain.component_data[1].size, width*height >> (hs + vs)) + self.assertEqual(grain.component_data[1].itemsize, bps) + self.assertEqual(grain.component_data[1].ndim, 2) + self.assertEqual(grain.component_data[1].shape, (width >> hs, height >> vs)) + + self.assertIsInstance(grain.component_data[2], np.ndarray) + self.assertEqual(grain.component_data[2].nbytes, width*height*bps >> (hs + vs)) + self.assertEqual(grain.component_data[2].dtype, dtype) + self.assertEqual(grain.component_data[2].size, width*height >> (hs + vs)) + self.assertEqual(grain.component_data[2].itemsize, bps) + self.assertEqual(grain.component_data[2].ndim, 2) + self.assertEqual(grain.component_data[2].shape, (width >> hs, height >> vs)) self.assertEqual(grain.expected_length, (width*height + 2*(width >> hs)*(height >> vs))*bps) # Test that changes to the component arrays are reflected in the main data array for y in range(0, 16): for x in range(0, 16): - grain.components[0].data[x, y] = (y*width + x) & 0x3F + grain.component_data[0][x, y] = (y*width + x) & 0x3F for y in range(0, 16 >> vs): for x in range(0, 16 >> hs): - grain.components[1].data[x, y] = (y*(width >> hs) + x) & 0x3F + 0x40 - grain.components[2].data[x, y] = (y*(width >> hs) + x) & 0x3F + 0x50 + grain.component_data[1][x, y] = (y*(width >> hs) + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*(width >> hs) + x) & 0x3F + 0x50 for y in range(0, 16): for x in range(0, 16): From 13b0e8c8cc9fe2432a22598e2f04f0c789a8c5b0 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 14:22:38 +0100 Subject: [PATCH 36/76] numpy: suppoprt for discontiguous planar formats --- mediagrains_py36/numpy/__init__.py | 3 +- tests/test36_numpy_videograin.py | 78 ++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 03e5fd9..79805bb 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -29,6 +29,7 @@ from copy import copy, deepcopy import numpy as np +from numpy.lib.stride_tricks import as_strided __all__ = ['VideoGrain', 'VIDEOGRAIN'] @@ -51,7 +52,7 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c arrays = [] for component in components: component_data = data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize] - component_data.shape = (component.height, component.width) + component_data = as_strided(component_data, shape=(component.height, component.width), strides=(component.stride, component_data.itemsize)) arrays.append(component_data.transpose()) return arrays diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 21518a4..5bf14d3 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -17,6 +17,8 @@ from unittest import TestCase +from pdb import set_trace + import uuid from mediagrains.numpy import VideoGrain from mediagrains.cogenums import ( @@ -183,6 +185,82 @@ def test_video_grain_create(self): self.assertIsVideoGrain(fmt)(grain) + def test_video_grain_create_discontiguous(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + data = bytearray(11*1024*1024) + + grain = VideoGrain({ + "grain": { + "grain_type": "video", + "source_id": src_id, + "flow_id": flow_id, + "origin_timestamp": ots, + "sync_timestamp": sts, + "creation_timestamp": cts, + "rate": { + "numerator": 25, + "denominator": 1, + }, + "duration": { + "numerator": 1, + "denominator": 25, + }, + "cog_frame": { + "format": CogFrameFormat.S16_422_10BIT, + "width": 1920, + "height": 1080, + "layout": CogFrameLayout.FULL_FRAME, + "extension": 0, + "components": [ + { + "stride": 4096, + "width": 1920, + "height": 1080, + "length": 4423680, + "offset": 0 + }, + { + "stride": 2048, + "width": 960, + "height": 1080, + "length": 2211840, + "offset": 5*1024*1024 + }, + { + "stride": 2048, + "width": 960, + "height": 1080, + "length": 2211840, + "offset": 8*1024*1024 + }, + ] + } + } + }, data) + + for y in range(0, 16): + for x in range(0, 16): + grain.component_data[0][x, y] = (y*1920 + x) & 0x3F + + for y in range(0, 16): + for x in range(0, 8): + grain.component_data[1][x, y] = (y*960 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*960 + x) & 0x3F + 0x50 + + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*grain.components[0].stride//2 + x], (y*1920 + x) & 0x3F) + + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[grain.components[1].offset//2 + y*grain.components[1].stride//2 + x], (y*960 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[grain.components[2].offset//2 + y*grain.components[2].stride//2 + x], (y*960 + x) & 0x3F + 0x50) + def test_copy(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") From 4e875143b198def0c1c21322582ad0d4b508627c Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 15:01:35 +0100 Subject: [PATCH 37/76] numpy: support for UYVY and YUYV --- mediagrains_py36/numpy/__init__.py | 24 +++++ tests/test36_numpy_videograin.py | 136 ++++++++++++++++++----------- 2 files changed, 110 insertions(+), 50 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 79805bb..8a25464 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -43,6 +43,8 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: return np.dtype(np.int16) elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: return np.dtype(np.int32) + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + return np.dtype(np.uint8) raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") @@ -55,6 +57,28 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c component_data = as_strided(component_data, shape=(component.height, component.width), strides=(component.stride, component_data.itemsize)) arrays.append(component_data.transpose()) return arrays + elif fmt == CogFrameFormat.UYVY: + return [ + as_strided(data[1:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*2)).transpose(), + as_strided(data, + shape=(components[0].height, components[0].width//2), + strides=(components[0].stride, data.itemsize*4)).transpose(), + as_strided(data[2:], + shape=(components[0].height, components[0].width//2), + strides=(components[0].stride, data.itemsize*4)).transpose()] + elif fmt == CogFrameFormat.YUYV: + return [ + as_strided(data, + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*2)).transpose(), + as_strided(data[1:], + shape=(components[0].height, components[0].width//2), + strides=(components[0].stride, data.itemsize*4)).transpose(), + as_strided(data[3:], + shape=(components[0].height, components[0].width//2), + strides=(components[0].stride, data.itemsize*4)).transpose()] raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 5bf14d3..25ea4fd 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -26,7 +26,9 @@ CogFrameLayout, COG_FRAME_FORMAT_BYTES_PER_VALUE, COG_FRAME_FORMAT_H_SHIFT, - COG_FRAME_FORMAT_V_SHIFT) + COG_FRAME_FORMAT_V_SHIFT, + COG_FRAME_IS_PACKED, + COG_FRAME_IS_COMPRESSED) from mediatimestamp.immutable import Timestamp, TimeRange import mock from fractions import Fraction @@ -66,31 +68,48 @@ def __inner(grain): self.assertIsNone(grain.source_aspect_ratio) self.assertIsNone(grain.pixel_aspect_ratio) - bps = COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) - hs = COG_FRAME_FORMAT_H_SHIFT(fmt) - vs = COG_FRAME_FORMAT_V_SHIFT(fmt) - - self.assertEqual(len(grain.components), 3) - self.assertEqual(grain.components[0].stride, width*bps) - self.assertEqual(grain.components[0].width, width) - self.assertEqual(grain.components[0].height, height) - self.assertEqual(grain.components[0].offset, 0) - self.assertEqual(grain.components[0].length, width*height*bps) - self.assertEqual(len(grain.components[0]), 5) - - self.assertEqual(grain.components[1].stride, width*bps >> hs) - self.assertEqual(grain.components[1].width, width >> hs) - self.assertEqual(grain.components[1].height, height >> vs) - self.assertEqual(grain.components[1].offset, width*height*bps) - self.assertEqual(grain.components[1].length, width*height*bps >> (hs + vs)) - self.assertEqual(len(grain.components[1]), 5) - - self.assertEqual(grain.components[2].stride, width*bps >> hs) - self.assertEqual(grain.components[2].width, width >> hs) - self.assertEqual(grain.components[2].height, height >> vs) - self.assertEqual(grain.components[2].offset, width*height*bps + (width*height*bps >> (hs + vs))) - self.assertEqual(grain.components[2].length, width*height*bps >> (hs + vs)) - self.assertEqual(len(grain.components[2]), 5) + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + bps = COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) + hs = COG_FRAME_FORMAT_H_SHIFT(fmt) + vs = COG_FRAME_FORMAT_V_SHIFT(fmt) + + self.assertEqual(len(grain.components), 3) + self.assertEqual(grain.components[0].stride, width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.components[1].stride, width*bps >> hs) + self.assertEqual(grain.components[1].width, width >> hs) + self.assertEqual(grain.components[1].height, height >> vs) + self.assertEqual(grain.components[1].offset, width*height*bps) + self.assertEqual(grain.components[1].length, width*height*bps >> (hs + vs)) + self.assertEqual(len(grain.components[1]), 5) + + self.assertEqual(grain.components[2].stride, width*bps >> hs) + self.assertEqual(grain.components[2].width, width >> hs) + self.assertEqual(grain.components[2].height, height >> vs) + self.assertEqual(grain.components[2].offset, width*height*bps + (width*height*bps >> (hs + vs))) + self.assertEqual(grain.components[2].length, width*height*bps >> (hs + vs)) + self.assertEqual(len(grain.components[2]), 5) + + self.assertEqual(grain.expected_length, (width*height + 2*(width >> hs)*(height >> vs))*bps) + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + bps = 1 + hs = 1 + vs = 0 + + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, width*bps + 2*(width >> hs)*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*2) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.expected_length, width*height*bps*2) if bps == 1: dtype = np.dtype(np.uint8) @@ -98,12 +117,12 @@ def __inner(grain): dtype = np.dtype(np.int16) self.assertIsInstance(grain.data, np.ndarray) - self.assertEqual(grain.data.nbytes, width*height*bps + 2*(width*height*bps >> (hs + vs))) + self.assertEqual(grain.data.nbytes, grain.expected_length) self.assertEqual(grain.data.dtype, dtype) - self.assertEqual(grain.data.size, width*height + 2*(width*height >> (hs + vs))) + self.assertEqual(grain.data.size, grain.expected_length//bps) self.assertEqual(grain.data.itemsize, bps) self.assertEqual(grain.data.ndim, 1) - self.assertEqual(grain.data.shape, (width*height + 2*(width*height >> (hs + vs)),)) + self.assertEqual(grain.data.shape, (grain.expected_length//bps,)) self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) @@ -131,26 +150,41 @@ def __inner(grain): self.assertEqual(grain.component_data[2].ndim, 2) self.assertEqual(grain.component_data[2].shape, (width >> hs, height >> vs)) - self.assertEqual(grain.expected_length, (width*height + 2*(width >> hs)*(height >> vs))*bps) - # Test that changes to the component arrays are reflected in the main data array for y in range(0, 16): for x in range(0, 16): - grain.component_data[0][x, y] = (y*width + x) & 0x3F - - for y in range(0, 16 >> vs): - for x in range(0, 16 >> hs): - grain.component_data[1][x, y] = (y*(width >> hs) + x) & 0x3F + 0x40 - grain.component_data[2][x, y] = (y*(width >> hs) + x) & 0x3F + 0x50 - - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width + x], (y*width + x) & 0x3F) + grain.component_data[0][x, y] = (y*16 + x) & 0x3F for y in range(0, 16 >> vs): for x in range(0, 16 >> hs): - self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*(width >> hs) + x) & 0x3F + 0x40) - self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*(width >> hs) + x) & 0x3F + 0x50) + grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 + + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) + + elif fmt == CogFrameFormat.UYVY: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + 2*x + 1) & 0x3F) + + elif fmt == CogFrameFormat.YUYV: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) return __inner @@ -176,7 +210,9 @@ def test_video_grain_create(self): CogFrameFormat.U8_444_RGB, CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_12BIT_RGB, - CogFrameFormat.S16_444_10BIT_RGB]: + CogFrameFormat.S16_444_10BIT_RGB, + CogFrameFormat.UYVY, + CogFrameFormat.YUYV]: with self.subTest(fmt=fmt): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, @@ -245,21 +281,21 @@ def test_video_grain_create_discontiguous(self): for y in range(0, 16): for x in range(0, 16): - grain.component_data[0][x, y] = (y*1920 + x) & 0x3F + grain.component_data[0][x, y] = (y*16 + x) & 0x3F for y in range(0, 16): for x in range(0, 8): - grain.component_data[1][x, y] = (y*960 + x) & 0x3F + 0x40 - grain.component_data[2][x, y] = (y*960 + x) & 0x3F + 0x50 + grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 for y in range(0, 16): for x in range(0, 16): - self.assertEqual(grain.data[y*grain.components[0].stride//2 + x], (y*1920 + x) & 0x3F) + self.assertEqual(grain.data[y*grain.components[0].stride//2 + x], (y*16 + x) & 0x3F) for y in range(0, 16): for x in range(0, 8): - self.assertEqual(grain.data[grain.components[1].offset//2 + y*grain.components[1].stride//2 + x], (y*960 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[grain.components[2].offset//2 + y*grain.components[2].stride//2 + x], (y*960 + x) & 0x3F + 0x50) + self.assertEqual(grain.data[grain.components[1].offset//2 + y*grain.components[1].stride//2 + x], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[grain.components[2].offset//2 + y*grain.components[2].stride//2 + x], (y*16 + x) & 0x3F + 0x50) def test_copy(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From c8da1f8ec9779f58667cb648d76db0034d5b37e1 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 15:23:50 +0100 Subject: [PATCH 38/76] numpy: support for packed RGB --- mediagrains_py36/numpy/__init__.py | 15 ++++++++++++++- tests/test36_numpy_videograin.py | 27 ++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 8a25464..8fa5453 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -43,7 +43,9 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: return np.dtype(np.int16) elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: return np.dtype(np.int32) - elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.RGB]: return np.dtype(np.uint8) raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") @@ -79,6 +81,17 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c as_strided(data[3:], shape=(components[0].height, components[0].width//2), strides=(components[0].stride, data.itemsize*4)).transpose()] + elif fmt == CogFrameFormat.RGB: + return [ + as_strided(data, + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*3)).transpose(), + as_strided(data[1:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*3)).transpose(), + as_strided(data[2:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*3)).transpose()] raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 25ea4fd..9da596b 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -110,6 +110,20 @@ def __inner(grain): self.assertEqual(len(grain.components[0]), 5) self.assertEqual(grain.expected_length, width*height*bps*2) + elif fmt in [CogFrameFormat.RGB]: + bps = 1 + hs = 0 + vs = 0 + + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, 3*width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*3) + self.assertEqual(len(grain.components[0]), 5) + else: + raise Exception() if bps == 1: dtype = np.dtype(np.uint8) @@ -186,6 +200,16 @@ def __inner(grain): self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + elif fmt == CogFrameFormat.RGB: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*3 + 3*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) + + else: + raise Exception() + return __inner def test_video_grain_create(self): @@ -212,7 +236,8 @@ def test_video_grain_create(self): CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.UYVY, - CogFrameFormat.YUYV]: + CogFrameFormat.YUYV, + CogFrameFormat.RGB]: with self.subTest(fmt=fmt): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, From 00e6a7938297676459766dbce9d9e05af1a34125 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 15:34:28 +0100 Subject: [PATCH 39/76] numpy: support for packed RGBx, BGRA, etc ... --- mediagrains_py36/numpy/__init__.py | 38 ++++++++++++++++++++++- tests/test36_numpy_videograin.py | 49 +++++++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 2 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 8fa5453..ce7012a 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -45,7 +45,15 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: return np.dtype(np.int32) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, - CogFrameFormat.RGB]: + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: return np.dtype(np.uint8) raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") @@ -92,6 +100,34 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c as_strided(data[2:], shape=(components[0].height, components[0].width), strides=(components[0].stride, data.itemsize*3)).transpose()] + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + return [ + as_strided(data, + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*4)).transpose(), + as_strided(data[1:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*4)).transpose(), + as_strided(data[2:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*4)).transpose()] + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return [ + as_strided(data[1:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*4)).transpose(), + as_strided(data[2:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*4)).transpose(), + as_strided(data[3:], + shape=(components[0].height, components[0].width), + strides=(components[0].stride, data.itemsize*4)).transpose()] raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 9da596b..37717e8 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -122,6 +122,25 @@ def __inner(grain): self.assertEqual(grain.components[0].offset, 0) self.assertEqual(grain.components[0].length, width*height*bps*3) self.assertEqual(len(grain.components[0]), 5) + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + bps = 1 + hs = 0 + vs = 0 + + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, 4*width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*4) + self.assertEqual(len(grain.components[0]), 5) else: raise Exception() @@ -207,6 +226,26 @@ def __inner(grain): self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + else: raise Exception() @@ -237,7 +276,15 @@ def test_video_grain_create(self): CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.UYVY, CogFrameFormat.YUYV, - CogFrameFormat.RGB]: + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: with self.subTest(fmt=fmt): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, From 95f5f9b46d459ef9b5b61a0c5f589cd1cb3f19e2 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 15:41:21 +0100 Subject: [PATCH 40/76] numpy: support for v216 --- mediagrains_py36/numpy/__init__.py | 4 +++- tests/test36_numpy_videograin.py | 19 +++++++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index ce7012a..b6eb0d6 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -55,6 +55,8 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: CogFrameFormat.ABGR, CogFrameFormat.xBGR]: return np.dtype(np.uint8) + elif fmt == CogFrameFormat.v216: + return np.dtype(np.int16) raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") @@ -67,7 +69,7 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c component_data = as_strided(component_data, shape=(component.height, component.width), strides=(component.stride, component_data.itemsize)) arrays.append(component_data.transpose()) return arrays - elif fmt == CogFrameFormat.UYVY: + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: return [ as_strided(data[1:], shape=(components[0].height, components[0].width), diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 37717e8..d910ffd 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -141,6 +141,20 @@ def __inner(grain): self.assertEqual(grain.components[0].offset, 0) self.assertEqual(grain.components[0].length, width*height*bps*4) self.assertEqual(len(grain.components[0]), 5) + + elif fmt == CogFrameFormat.v216: + bps = 2 + hs = 1 + vs = 0 + + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, 2*width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*2) + self.assertEqual(len(grain.components[0]), 5) + else: raise Exception() @@ -203,7 +217,7 @@ def __inner(grain): self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) - elif fmt == CogFrameFormat.UYVY: + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: for y in range(0, 16): for x in range(0, 8): self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) @@ -284,7 +298,8 @@ def test_video_grain_create(self): CogFrameFormat.ARGB, CogFrameFormat.xRGB, CogFrameFormat.ABGR, - CogFrameFormat.xBGR]: + CogFrameFormat.xBGR, + CogFrameFormat.v216]: with self.subTest(fmt=fmt): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, From 8c486d200992772057a8407719a3e8fe9a51cc29 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Thu, 10 Oct 2019 15:51:40 +0100 Subject: [PATCH 41/76] numpy: minimal support for v210 --- mediagrains_py36/numpy/__init__.py | 5 + tests/test36_numpy_videograin.py | 199 ++++++++++++++++------------- 2 files changed, 117 insertions(+), 87 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index b6eb0d6..95180aa 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -57,6 +57,8 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: return np.dtype(np.uint8) elif fmt == CogFrameFormat.v216: return np.dtype(np.int16) + elif fmt == CogFrameFormat.v210: + return np.dtype(np.int32) raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") @@ -130,6 +132,9 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c as_strided(data[3:], shape=(components[0].height, components[0].width), strides=(components[0].stride, data.itemsize*4)).transpose()] + elif fmt == CogFrameFormat.v210: + # v210 is barely supported. Convert it to something else to actually use it! + return [] raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index d910ffd..6ea4727 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -155,13 +155,30 @@ def __inner(grain): self.assertEqual(grain.components[0].length, width*height*bps*2) self.assertEqual(len(grain.components[0]), 5) + elif fmt == CogFrameFormat.v210: + bps = 4 + hs = 1 + vs = 0 + + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, (((width + 47) // 48) * 128)) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, height*(((width + 47) // 48) * 128)) + self.assertEqual(len(grain.components[0]), 5) + else: raise Exception() if bps == 1: dtype = np.dtype(np.uint8) - else: + elif bps == 2: dtype = np.dtype(np.int16) + elif bps == 4: + dtype = np.dtype(np.int32) + else: + raise Exception() self.assertIsInstance(grain.data, np.ndarray) self.assertEqual(grain.data.nbytes, grain.expected_length) @@ -173,95 +190,99 @@ def __inner(grain): self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) - self.assertIsInstance(grain.component_data[0], np.ndarray) - self.assertEqual(grain.component_data[0].nbytes, width*height*bps) - self.assertEqual(grain.component_data[0].dtype, dtype) - self.assertEqual(grain.component_data[0].size, width*height) - self.assertEqual(grain.component_data[0].itemsize, bps) - self.assertEqual(grain.component_data[0].ndim, 2) - self.assertEqual(grain.component_data[0].shape, (width, height)) - - self.assertIsInstance(grain.component_data[1], np.ndarray) - self.assertEqual(grain.component_data[1].nbytes, width*height*bps >> (hs + vs)) - self.assertEqual(grain.component_data[1].dtype, dtype) - self.assertEqual(grain.component_data[1].size, width*height >> (hs + vs)) - self.assertEqual(grain.component_data[1].itemsize, bps) - self.assertEqual(grain.component_data[1].ndim, 2) - self.assertEqual(grain.component_data[1].shape, (width >> hs, height >> vs)) - - self.assertIsInstance(grain.component_data[2], np.ndarray) - self.assertEqual(grain.component_data[2].nbytes, width*height*bps >> (hs + vs)) - self.assertEqual(grain.component_data[2].dtype, dtype) - self.assertEqual(grain.component_data[2].size, width*height >> (hs + vs)) - self.assertEqual(grain.component_data[2].itemsize, bps) - self.assertEqual(grain.component_data[2].ndim, 2) - self.assertEqual(grain.component_data[2].shape, (width >> hs, height >> vs)) - - # Test that changes to the component arrays are reflected in the main data array - for y in range(0, 16): - for x in range(0, 16): - grain.component_data[0][x, y] = (y*16 + x) & 0x3F - - for y in range(0, 16 >> vs): - for x in range(0, 16 >> hs): - grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 - grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 - - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if fmt == CogFrameFormat.v210: + # V210 is barely supported. Convert it to something else to actually use it! + self.assertEqual(len(grain.component_data), 0) + else: + self.assertIsInstance(grain.component_data[0], np.ndarray) + self.assertEqual(grain.component_data[0].nbytes, width*height*bps) + self.assertEqual(grain.component_data[0].dtype, dtype) + self.assertEqual(grain.component_data[0].size, width*height) + self.assertEqual(grain.component_data[0].itemsize, bps) + self.assertEqual(grain.component_data[0].ndim, 2) + self.assertEqual(grain.component_data[0].shape, (width, height)) + + self.assertIsInstance(grain.component_data[1], np.ndarray) + self.assertEqual(grain.component_data[1].nbytes, width*height*bps >> (hs + vs)) + self.assertEqual(grain.component_data[1].dtype, dtype) + self.assertEqual(grain.component_data[1].size, width*height >> (hs + vs)) + self.assertEqual(grain.component_data[1].itemsize, bps) + self.assertEqual(grain.component_data[1].ndim, 2) + self.assertEqual(grain.component_data[1].shape, (width >> hs, height >> vs)) + + self.assertIsInstance(grain.component_data[2], np.ndarray) + self.assertEqual(grain.component_data[2].nbytes, width*height*bps >> (hs + vs)) + self.assertEqual(grain.component_data[2].dtype, dtype) + self.assertEqual(grain.component_data[2].size, width*height >> (hs + vs)) + self.assertEqual(grain.component_data[2].itemsize, bps) + self.assertEqual(grain.component_data[2].ndim, 2) + self.assertEqual(grain.component_data[2].shape, (width >> hs, height >> vs)) + + # Test that changes to the component arrays are reflected in the main data array for y in range(0, 16): for x in range(0, 16): - self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) + grain.component_data[0][x, y] = (y*16 + x) & 0x3F for y in range(0, 16 >> vs): for x in range(0, 16 >> hs): - self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) - - elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: - for y in range(0, 16): - for x in range(0, 8): - self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + 2*x + 0) & 0x3F) - self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) - self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + 2*x + 1) & 0x3F) - - elif fmt == CogFrameFormat.YUYV: - for y in range(0, 16): - for x in range(0, 8): - self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + 2*x + 0) & 0x3F) - self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) - self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) - - elif fmt == CogFrameFormat.RGB: - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width*3 + 3*x + 0], (y*16 + x) & 0x3F) - self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) - - elif fmt in [CogFrameFormat.RGBx, - CogFrameFormat.RGBA, - CogFrameFormat.BGRx, - CogFrameFormat.BGRx]: - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width*4 + 4*x + 0], (y*16 + x) & 0x3F) - self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) - - elif fmt in [CogFrameFormat.ARGB, - CogFrameFormat.xRGB, - CogFrameFormat.ABGR, - CogFrameFormat.xBGR]: - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F) - self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*4 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) - - else: - raise Exception() + grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 + + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + 2*x + 1) & 0x3F) + + elif fmt == CogFrameFormat.YUYV: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + + elif fmt == CogFrameFormat.RGB: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*3 + 3*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + + else: + raise Exception() return __inner @@ -272,7 +293,10 @@ def test_video_grain_create(self): ots = Timestamp.from_tai_sec_nsec("417798915:5") sts = Timestamp.from_tai_sec_nsec("417798915:10") - for fmt in [CogFrameFormat.S16_444_10BIT, + for fmt in [CogFrameFormat.S32_444, + CogFrameFormat.S32_422, + CogFrameFormat.S32_420, + CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_444_12BIT, @@ -299,7 +323,8 @@ def test_video_grain_create(self): CogFrameFormat.xRGB, CogFrameFormat.ABGR, CogFrameFormat.xBGR, - CogFrameFormat.v216]: + CogFrameFormat.v216, + CogFrameFormat.v210]: with self.subTest(fmt=fmt): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, From 3ede498e4a56222e145f556c2e8715d475c7b316 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 09:54:03 +0100 Subject: [PATCH 42/76] numpy: Overall simplifications of test code --- mediagrains/cogenums.py | 9 +- mediagrains/grain.py | 3 + tests/test36_numpy_videograin.py | 173 ++++++++++++++++++++++++++----- 3 files changed, 156 insertions(+), 29 deletions(-) diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index d5949c5..43e6570 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -33,7 +33,8 @@ 'COG_FRAME_IS_COMPRESSED', 'COG_FRAME_FORMAT_BYTES_PER_VALUE', 'COG_FRAME_FORMAT_H_SHIFT', - 'COG_FRAME_FORMAT_V_SHIFT'] + 'COG_FRAME_FORMAT_V_SHIFT', + 'COG_FRAME_FORMAT_ACTIVE_BITS'] class CogFrameFormat(IntEnum): @@ -152,4 +153,8 @@ def COG_FRAME_FORMAT_H_SHIFT(fmt): def COG_FRAME_FORMAT_V_SHIFT(fmt): - return ((fmt >> 1) & 0x1) \ No newline at end of file + return ((fmt >> 1) & 0x1) + + +def COG_FRAME_FORMAT_ACTIVE_BITS(fmt): + return (((int(fmt)) >> 10) & 0x3F) \ No newline at end of file diff --git a/mediagrains/grain.py b/mediagrains/grain.py index 0288448..9fce517 100644 --- a/mediagrains/grain.py +++ b/mediagrains/grain.py @@ -209,6 +209,9 @@ def __deepcopy__(self, memo): from .grain_constructors import Grain return Grain(deepcopy(self.meta), deepcopy(self.data)) + def __bytes__(self): + return bytes(self._data) + @property def data(self): return self._data diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 6ea4727..ee541a2 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -17,8 +17,6 @@ from unittest import TestCase -from pdb import set_trace - import uuid from mediagrains.numpy import VideoGrain from mediagrains.cogenums import ( @@ -28,7 +26,9 @@ COG_FRAME_FORMAT_H_SHIFT, COG_FRAME_FORMAT_V_SHIFT, COG_FRAME_IS_PACKED, - COG_FRAME_IS_COMPRESSED) + COG_FRAME_IS_COMPRESSED, + COG_FRAME_IS_RGB, + COG_FRAME_FORMAT_ACTIVE_BITS) from mediatimestamp.immutable import Timestamp, TimeRange import mock from fractions import Fraction @@ -38,6 +38,144 @@ class TestGrain (TestCase): + def _get_bitdepth(self, fmt): + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + return COG_FRAME_FORMAT_ACTIVE_BITS(fmt) + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return 8 + elif fmt == CogFrameFormat.v216: + return 16 + elif fmt == CogFrameFormat.v210: + return 10 + else: + raise Exception() + + def _get_hs_vs_and_bps(self, fmt): + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + return (COG_FRAME_FORMAT_H_SHIFT(fmt), COG_FRAME_FORMAT_V_SHIFT(fmt), COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt)) + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + return (1, 0, 1) + elif fmt in [CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return (0, 0, 1) + elif fmt == CogFrameFormat.v216: + return (1, 0, 2) + elif fmt == CogFrameFormat.v210: + return (1, 0, 4) + else: + raise Exception() + + def _is_rgb(self, fmt): + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + return COG_FRAME_IS_RGB(fmt) + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.v216, + CogFrameFormat.v210]: + return False + elif fmt in [CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return True + else: + raise Exception() + + def assertComponentsAreModifiable(self, grain): + width = grain.width + height = grain.height + fmt = grain.format + + (hs, vs, _) = self._get_hs_vs_and_bps(fmt) + + # Test that changes to the component arrays are reflected in the main data array + for y in range(0, 16): + for x in range(0, 16): + grain.component_data[0][x, y] = (y*16 + x) & 0x3F + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 + + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + 2*x + 1) & 0x3F) + + elif fmt == CogFrameFormat.YUYV: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + + elif fmt == CogFrameFormat.RGB: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*3 + 3*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + + else: + raise Exception() + def assertIsVideoGrain(self, fmt, src_id=uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429"), @@ -68,11 +206,9 @@ def __inner(grain): self.assertIsNone(grain.source_aspect_ratio) self.assertIsNone(grain.pixel_aspect_ratio) - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): - bps = COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) - hs = COG_FRAME_FORMAT_H_SHIFT(fmt) - vs = COG_FRAME_FORMAT_V_SHIFT(fmt) + (hs, vs, bps) = self._get_hs_vs_and_bps(fmt) + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): self.assertEqual(len(grain.components), 3) self.assertEqual(grain.components[0].stride, width*bps) self.assertEqual(grain.components[0].width, width) @@ -97,10 +233,6 @@ def __inner(grain): self.assertEqual(grain.expected_length, (width*height + 2*(width >> hs)*(height >> vs))*bps) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - bps = 1 - hs = 1 - vs = 0 - self.assertEqual(len(grain.components), 1) self.assertEqual(grain.components[0].stride, width*bps + 2*(width >> hs)*bps) self.assertEqual(grain.components[0].width, width) @@ -111,10 +243,6 @@ def __inner(grain): self.assertEqual(grain.expected_length, width*height*bps*2) elif fmt in [CogFrameFormat.RGB]: - bps = 1 - hs = 0 - vs = 0 - self.assertEqual(len(grain.components), 1) self.assertEqual(grain.components[0].stride, 3*width*bps) self.assertEqual(grain.components[0].width, width) @@ -130,10 +258,6 @@ def __inner(grain): CogFrameFormat.xRGB, CogFrameFormat.ABGR, CogFrameFormat.xBGR]: - bps = 1 - hs = 0 - vs = 0 - self.assertEqual(len(grain.components), 1) self.assertEqual(grain.components[0].stride, 4*width*bps) self.assertEqual(grain.components[0].width, width) @@ -143,10 +267,6 @@ def __inner(grain): self.assertEqual(len(grain.components[0]), 5) elif fmt == CogFrameFormat.v216: - bps = 2 - hs = 1 - vs = 0 - self.assertEqual(len(grain.components), 1) self.assertEqual(grain.components[0].stride, 2*width*bps) self.assertEqual(grain.components[0].width, width) @@ -156,10 +276,6 @@ def __inner(grain): self.assertEqual(len(grain.components[0]), 5) elif fmt == CogFrameFormat.v210: - bps = 4 - hs = 1 - vs = 0 - self.assertEqual(len(grain.components), 1) self.assertEqual(grain.components[0].stride, (((width + 47) // 48) * 128)) self.assertEqual(grain.components[0].width, width) @@ -333,6 +449,9 @@ def test_video_grain_create(self): self.assertIsVideoGrain(fmt)(grain) + if fmt is not CogFrameFormat.v210: + self.assertComponentsAreModifiable(grain) + def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") From 89027dada6f14634313bd97205da893336941be5 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 10:57:50 +0100 Subject: [PATCH 43/76] numpy: Added named access to component data arrays as a convenience --- mediagrains/cogenums.py | 1 + mediagrains_py36/numpy/__init__.py | 47 ++++++++++++++++++++++++++++-- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index 43e6570..5ed18ed 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -31,6 +31,7 @@ 'CogAudioFormat', 'COG_FRAME_IS_PACKED', 'COG_FRAME_IS_COMPRESSED', + 'COG_FRAME_IS_RGB', 'COG_FRAME_FORMAT_BYTES_PER_VALUE', 'COG_FRAME_FORMAT_H_SHIFT', 'COG_FRAME_FORMAT_V_SHIFT', diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 95180aa..8dc3410 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -23,7 +23,8 @@ CogFrameFormat, COG_FRAME_IS_PACKED, COG_FRAME_IS_COMPRESSED, - COG_FRAME_FORMAT_BYTES_PER_VALUE) + COG_FRAME_FORMAT_BYTES_PER_VALUE, + COG_FRAME_IS_RGB) from mediagrains import grain as bytesgrain from mediagrains import grain_constructors as bytesgrain_constructors from copy import copy, deepcopy @@ -31,6 +32,7 @@ import numpy as np from numpy.lib.stride_tricks import as_strided +from enum import Enum, auto __all__ = ['VideoGrain', 'VIDEOGRAIN'] @@ -63,6 +65,45 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") +class ComponentDataList(list): + class ComponentOrder (Enum): + YUV = auto() + RGB = auto() + BGR = auto() + X = auto() + + def __init__(self, data: list, arrangement: ComponentOrder=ComponentOrder.X): + super().__init__(data) + if arrangement == ComponentDataList.ComponentOrder.YUV: + self.Y = self[0] + self.U = self[1] + self.V = self[2] + elif arrangement == ComponentDataList.ComponentOrder.RGB: + self.R = self[0] + self.G = self[1] + self.B = self[2] + elif arrangement == ComponentDataList.ComponentOrder.BGR: + self.B = self[0] + self.G = self[1] + self.R = self[2] + + +def _component_arrangement_from_format(fmt: CogFrameFormat): + if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_IS_RGB(fmt): + return ComponentDataList.ComponentOrder.RGB + else: + return ComponentDataList.ComponentOrder.YUV + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216]: + return ComponentDataList.ComponentOrder.YUV + elif fmt in [CogFrameFormat.RGB, CogFrameFormat.RGBA, CogFrameFormat.RGBx, CogFrameFormat.ARGB, CogFrameFormat.xRGB]: + return ComponentDataList.ComponentOrder.RGB + elif fmt in [CogFrameFormat.BGRA, CogFrameFormat.BGRx]: + return ComponentDataList.ComponentOrder.BGR + else: + return ComponentDataList.ComponentOrder.X + + def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): arrays = [] @@ -143,7 +184,9 @@ class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): def __init__(self, meta, data): super().__init__(meta, data) self._data = np.frombuffer(self._data, dtype=_dtype_from_cogframeformat(self.format)) - self.component_data = _component_arrays_for_data_and_type(self._data, self.format, self.components) + self.component_data = ComponentDataList( + _component_arrays_for_data_and_type(self._data, self.format, self.components), + arrangement=_component_arrangement_from_format(self.format)) def __array__(self): return np.array(self.data) From a9392e5af833a28fc4d1cca443cf33fda1bad0f6 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 16:47:17 +0100 Subject: [PATCH 44/76] grain: remove unneeded changes to VIDEOGRAIN.COMPONENT --- mediagrains/grain.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mediagrains/grain.py b/mediagrains/grain.py index 9fce517..f3363dc 100644 --- a/mediagrains/grain.py +++ b/mediagrains/grain.py @@ -796,9 +796,7 @@ class COMPONENT(Mapping): length The total length of the data for this component in bytes """ - def __init__(self, parent, index, meta): - self.parent = parent - self.index = index + def __init__(self, meta): self.meta = meta def __getitem__(self, key): @@ -867,16 +865,16 @@ def __init__(self, parent): self.parent = parent def __getitem__(self, key): - return type(self.parent).COMPONENT(self, key, self.parent.meta['grain']['cog_frame']['components'][key]) + return type(self.parent).COMPONENT(self.parent.meta['grain']['cog_frame']['components'][key]) def __setitem__(self, key, value): - self.parent.meta['grain']['cog_frame']['components'][key] = type(self.parent).COMPONENT(self, key, value) + self.parent.meta['grain']['cog_frame']['components'][key] = type(self.parent).COMPONENT(value) def __delitem__(self, key): del self.parent.meta['grain']['cog_frame']['components'][key] def insert(self, key, value): - self.parent.meta['grain']['cog_frame']['components'].insert(key, type(self.parent).COMPONENT(self, key, value)) + self.parent.meta['grain']['cog_frame']['components'].insert(key, type(self.parent).COMPONENT(value)) def __len__(self): return len(self.parent.meta['grain']['cog_frame']['components']) From f979b982ce7597c501defe0946f6587e5d78bc83 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 17:17:37 +0100 Subject: [PATCH 45/76] numpy: More comments and explanation --- mediagrains/cogenums.py | 11 +++++-- mediagrains_py36/numpy/__init__.py | 48 +++++++++++++++++++++++++----- tests/test36_numpy_videograin.py | 17 ++++++----- 3 files changed, 57 insertions(+), 19 deletions(-) diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index 5ed18ed..a51cb7d 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -31,7 +31,8 @@ 'CogAudioFormat', 'COG_FRAME_IS_PACKED', 'COG_FRAME_IS_COMPRESSED', - 'COG_FRAME_IS_RGB', + 'COG_FRAME_IS_PLANAR', + 'COG_FRAME_IS_PLANAR_RGB', 'COG_FRAME_FORMAT_BYTES_PER_VALUE', 'COG_FRAME_FORMAT_H_SHIFT', 'COG_FRAME_FORMAT_V_SHIFT', @@ -132,12 +133,16 @@ def COG_FRAME_IS_COMPRESSED(fmt): return ((fmt >> 9) & 0x1) != 0 +def COG_FRAME_IS_PLANAR(fmt): + return ((fmt >> 8) & 0x3) == 0 + + def COG_FRAME_IS_ALPHA(fmt): return ((fmt >> 7) & 0x1) != 0 -def COG_FRAME_IS_RGB(fmt): - return ((fmt >> 4) & 0x1) != 0 +def COG_FRAME_IS_PLANAR_RGB(fmt): + return ((fmt >> 4) & 0x31) == 1 def COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt): diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 8dc3410..055fee4 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -23,8 +23,9 @@ CogFrameFormat, COG_FRAME_IS_PACKED, COG_FRAME_IS_COMPRESSED, + COG_FRAME_IS_PLANAR, COG_FRAME_FORMAT_BYTES_PER_VALUE, - COG_FRAME_IS_RGB) + COG_FRAME_IS_PLANAR_RGB) from mediagrains import grain as bytesgrain from mediagrains import grain_constructors as bytesgrain_constructors from copy import copy, deepcopy @@ -38,7 +39,13 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + """This method returns the numpy "data type" for a particular video format. + + For planar and padded formats this is the size of the native integer type that is used to handle the samples (eg. 8bit, 16bit, etc ...) + For weird packed formats like v210 (10-bit samples packed so that there are 3 10-bit samples in every 32-bit word) this is not possible. + Instead for v210 we return int32, since that is the most useful native data type that always corresponds to an integral number of samples. + """ + if COG_FRAME_IS_PLANAR(fmt): if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: return np.dtype(np.uint8) elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 2: @@ -47,6 +54,7 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: return np.dtype(np.int32) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, + CogFrameFormat.AYUV, CogFrameFormat.RGB, CogFrameFormat.RGBx, CogFrameFormat.RGBA, @@ -89,23 +97,39 @@ def __init__(self, data: list, arrangement: ComponentOrder=ComponentOrder.X): def _component_arrangement_from_format(fmt: CogFrameFormat): - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): - if COG_FRAME_IS_RGB(fmt): + """This method returns the ordering of the components in the component data arrays that are used to represent a particular format. + + Note that for the likes of UYVY this will return YUV since the planes are represented in that order by the interface even though they + are interleved in the data. + + For formats where no meaningful component access can be provided (v210, compressed formats, etc ...) the value X is returned. + """ + if COG_FRAME_IS_PLANAR(fmt): + if COG_FRAME_IS_PLANAR_RGB(fmt): return ComponentDataList.ComponentOrder.RGB else: return ComponentDataList.ComponentOrder.YUV - elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216]: + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216, CogFrameFormat.AYUV]: return ComponentDataList.ComponentOrder.YUV elif fmt in [CogFrameFormat.RGB, CogFrameFormat.RGBA, CogFrameFormat.RGBx, CogFrameFormat.ARGB, CogFrameFormat.xRGB]: return ComponentDataList.ComponentOrder.RGB - elif fmt in [CogFrameFormat.BGRA, CogFrameFormat.BGRx]: + elif fmt in [CogFrameFormat.BGRA, CogFrameFormat.BGRx, CogFrameFormat.xBGR, CogFrameFormat.ABGR]: return ComponentDataList.ComponentOrder.BGR else: return ComponentDataList.ComponentOrder.X def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + """This method returns a list of numpy array views which can be used to directly access the components of the video frame + without any need for conversion or copying. This is not possible for all formats. + + For planar formats this simply returns a list of array views of the planes. + + For interleaved formats this returns a list of array views that use stride tricks to access alternate elements in the source data array. + + For weird packed formats like v210 nothing can be done, an empty list is returned since no individual component access is possible. + """ + if COG_FRAME_IS_PLANAR(fmt): arrays = [] for component in components: component_data = data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize] @@ -113,6 +137,7 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c arrays.append(component_data.transpose()) return arrays elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: + # Either 8 or 16 bits 4:2:2 interleavedd in UYVY order return [ as_strided(data[1:], shape=(components[0].height, components[0].width), @@ -124,6 +149,7 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c shape=(components[0].height, components[0].width//2), strides=(components[0].stride, data.itemsize*4)).transpose()] elif fmt == CogFrameFormat.YUYV: + # 8 bit 4:2:2 interleaved in YUYV order return [ as_strided(data, shape=(components[0].height, components[0].width), @@ -135,6 +161,7 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c shape=(components[0].height, components[0].width//2), strides=(components[0].stride, data.itemsize*4)).transpose()] elif fmt == CogFrameFormat.RGB: + # 8 bit 4:4:4 three components interleaved in RGB order return [ as_strided(data, shape=(components[0].height, components[0].width), @@ -149,6 +176,7 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c CogFrameFormat.RGBA, CogFrameFormat.BGRx, CogFrameFormat.BGRx]: + # 8 bit 4:4:4:4 four components interleave dropping the fourth component return [ as_strided(data, shape=(components[0].height, components[0].width), @@ -162,7 +190,9 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c elif fmt in [CogFrameFormat.ARGB, CogFrameFormat.xRGB, CogFrameFormat.ABGR, - CogFrameFormat.xBGR]: + CogFrameFormat.xBGR, + CogFrameFormat.AYUV]: + # 8 bit 4:4:4:4 four components interleave dropping the first component return [ as_strided(data[1:], shape=(components[0].height, components[0].width), @@ -175,6 +205,8 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c strides=(components[0].stride, data.itemsize*4)).transpose()] elif fmt == CogFrameFormat.v210: # v210 is barely supported. Convert it to something else to actually use it! + # This method returns an empty list because component access isn't supported, but + # the more basic access to the underlying data is. return [] raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index ee541a2..c0c229a 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -27,7 +27,8 @@ COG_FRAME_FORMAT_V_SHIFT, COG_FRAME_IS_PACKED, COG_FRAME_IS_COMPRESSED, - COG_FRAME_IS_RGB, + COG_FRAME_IS_PLANAR, + COG_FRAME_IS_PLANAR_RGB, COG_FRAME_FORMAT_ACTIVE_BITS) from mediatimestamp.immutable import Timestamp, TimeRange import mock @@ -39,7 +40,7 @@ class TestGrain (TestCase): def _get_bitdepth(self, fmt): - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_IS_PLANAR(fmt): return COG_FRAME_FORMAT_ACTIVE_BITS(fmt) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, @@ -61,7 +62,7 @@ def _get_bitdepth(self, fmt): raise Exception() def _get_hs_vs_and_bps(self, fmt): - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_IS_PLANAR(fmt): return (COG_FRAME_FORMAT_H_SHIFT(fmt), COG_FRAME_FORMAT_V_SHIFT(fmt), COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt)) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: return (1, 0, 1) @@ -83,8 +84,8 @@ def _get_hs_vs_and_bps(self, fmt): raise Exception() def _is_rgb(self, fmt): - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): - return COG_FRAME_IS_RGB(fmt) + if COG_FRAME_IS_PLANAR(fmt): + return COG_FRAME_IS_PLANAR_RGB(fmt) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216, @@ -120,7 +121,7 @@ def assertComponentsAreModifiable(self, grain): grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_IS_PLANAR(fmt): for y in range(0, 16): for x in range(0, 16): self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) @@ -208,7 +209,7 @@ def __inner(grain): (hs, vs, bps) = self._get_hs_vs_and_bps(fmt) - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_IS_PLANAR(fmt): self.assertEqual(len(grain.components), 3) self.assertEqual(grain.components[0].stride, width*bps) self.assertEqual(grain.components[0].width, width) @@ -344,7 +345,7 @@ def __inner(grain): grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 - if not COG_FRAME_IS_PACKED(fmt) and not COG_FRAME_IS_COMPRESSED(fmt): + if COG_FRAME_IS_PLANAR(fmt): for y in range(0, 16): for x in range(0, 16): self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) From 7bd0b65f5b3f9a3fc82cdd78eee86831082a68a8 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 17:44:12 +0100 Subject: [PATCH 46/76] numpy: Some code refactoring to make logic clearer in places --- mediagrains_py36/numpy/__init__.py | 92 ++++++++++++------------------ 1 file changed, 36 insertions(+), 56 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 055fee4..dd83fb3 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -119,6 +119,32 @@ def _component_arrangement_from_format(fmt: CogFrameFormat): return ComponentDataList.ComponentOrder.X +def _component_arrays_for_interleaved_422(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int): + return [ + as_strided(data0, + shape=(height, width), + strides=(stride, itemsize*2)).transpose(), + as_strided(data1, + shape=(height, width//2), + strides=(stride, itemsize*4)).transpose(), + as_strided(data2, + shape=(height, width//2), + strides=(stride, itemsize*4)).transpose()] + + +def _component_arrays_for_interleaved_444_take_three(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int, num_components: int = 3): + return [ + as_strided(data0, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose(), + as_strided(data1, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose(), + as_strided(data2, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose()] + + def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): """This method returns a list of numpy array views which can be used to directly access the components of the video frame without any need for conversion or copying. This is not possible for all formats. @@ -130,79 +156,33 @@ def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, c For weird packed formats like v210 nothing can be done, an empty list is returned since no individual component access is possible. """ if COG_FRAME_IS_PLANAR(fmt): - arrays = [] - for component in components: - component_data = data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize] - component_data = as_strided(component_data, shape=(component.height, component.width), strides=(component.stride, component_data.itemsize)) - arrays.append(component_data.transpose()) - return arrays + return [ + as_strided(data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize], + shape=(component.height, component.width), + strides=(component.stride, data.itemsize)).transpose() + for component in components] elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: # Either 8 or 16 bits 4:2:2 interleavedd in UYVY order - return [ - as_strided(data[1:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*2)).transpose(), - as_strided(data, - shape=(components[0].height, components[0].width//2), - strides=(components[0].stride, data.itemsize*4)).transpose(), - as_strided(data[2:], - shape=(components[0].height, components[0].width//2), - strides=(components[0].stride, data.itemsize*4)).transpose()] + return _component_arrays_for_interleaved_422(data[1:], data, data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) elif fmt == CogFrameFormat.YUYV: # 8 bit 4:2:2 interleaved in YUYV order - return [ - as_strided(data, - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*2)).transpose(), - as_strided(data[1:], - shape=(components[0].height, components[0].width//2), - strides=(components[0].stride, data.itemsize*4)).transpose(), - as_strided(data[3:], - shape=(components[0].height, components[0].width//2), - strides=(components[0].stride, data.itemsize*4)).transpose()] + return _component_arrays_for_interleaved_422(data, data[1:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize) elif fmt == CogFrameFormat.RGB: # 8 bit 4:4:4 three components interleaved in RGB order - return [ - as_strided(data, - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*3)).transpose(), - as_strided(data[1:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*3)).transpose(), - as_strided(data[2:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*3)).transpose()] + return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) elif fmt in [CogFrameFormat.RGBx, CogFrameFormat.RGBA, CogFrameFormat.BGRx, CogFrameFormat.BGRx]: # 8 bit 4:4:4:4 four components interleave dropping the fourth component - return [ - as_strided(data, - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*4)).transpose(), - as_strided(data[1:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*4)).transpose(), - as_strided(data[2:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*4)).transpose()] + return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) elif fmt in [CogFrameFormat.ARGB, CogFrameFormat.xRGB, CogFrameFormat.ABGR, CogFrameFormat.xBGR, CogFrameFormat.AYUV]: # 8 bit 4:4:4:4 four components interleave dropping the first component - return [ - as_strided(data[1:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*4)).transpose(), - as_strided(data[2:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*4)).transpose(), - as_strided(data[3:], - shape=(components[0].height, components[0].width), - strides=(components[0].stride, data.itemsize*4)).transpose()] + return _component_arrays_for_interleaved_444_take_three(data[1:], data[2:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) elif fmt == CogFrameFormat.v210: # v210 is barely supported. Convert it to something else to actually use it! # This method returns an empty list because component access isn't supported, but From 36315c93ce1fb03f0f756d1e0fda727c855f84a7 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 17:59:10 +0100 Subject: [PATCH 47/76] numpy: Add numpy to README --- README.md | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 824a032..938a372 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ documentation for more details. ### Requirements -* A working Python 2.7 or Python 3.x installation +* A working Python 2.7 or Python 3.6+ installation * BBC R&D's internal deb repository set up as a source for apt (if installing via apt-get) * The tool [tox](https://tox.readthedocs.io/en/latest/) is needed to run the unittests, but not required to use the library. @@ -85,6 +85,8 @@ it with colour-bars: ... i += 1 ``` +(In python3.6+ a more natural interface for accessing data exists in the form of numpy arrays. See later.) + The object grain can then be freely used for whatever video processing is desired, or it can be serialised into a GSF file as follows: @@ -159,6 +161,30 @@ between two grains, both as a printed string (as seen above) and also in a data-centric fashion as a tree structure which can be interrogated in code. +### Numpy arrays (Python 3.6+) + +In python 3.6 or higher an additional feature is provided in the form of numpy array access to the data in a grain. As such the above example of creating colourbars can be done more easily: + +```Python console +>>> from mediagrains.numpy import VideoGrain +>>> from uuid import uuid1 +>>> from mediagrains.cogenums import CogFrameFormat, CogFrameLayout +>>> src_id = uuid1() +>>> flow_id = uuid1() +>>> grain = VideoGrain(src_id, flow_id, cog_frame_format=CogFrameFormat.S16_422_10BIT, width=1920, height=1080) +>>> colours = [ +... (0x3FF, 0x000, 0x3FF), +... (0x3FF, 0x3FF, 0x000), +... (0x3FF, 0x000, 0x000), +... (0x3FF, 0x3FF, 0x3FF), +... (0x3FF, 0x200, 0x3FF), +... (0x3FF, 0x3FF, 0x200) ] +>>> for c in range(0, 3): +... for x in range(0, grain.components[c].width): +... for y in range(0, grain.components[c].height): +... grain.component_data[c][x, y] = colours[x*len(colours)//grain.components[c].width][c] +``` + ## Documentation The API is well documented in the docstrings of the module mediagrains, to view: From 112cb3ce86ed1c946fa3721f588eec30ef6537ee Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Tue, 15 Oct 2019 15:28:04 +0100 Subject: [PATCH 48/76] support planar video psnr in grain comparisons --- CHANGELOG.md | 1 + mediagrains/cogenums.py | 2 +- mediagrains/comparison/__init__.py | 3 +- mediagrains/comparison/_internal.py | 85 +++++++++++++++++------ mediagrains/comparison/options.py | 62 ++++++++++++++++- mediagrains/comparison/psnr.py | 103 ++++++++++++++++++++++++++++ setup.py | 1 + 7 files changed, 233 insertions(+), 24 deletions(-) create mode 100644 mediagrains/comparison/psnr.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d06829..7e1f81f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ - Added `Colourbars` test signal generator - Added `MovingBarOverlay` for test signal generators - Added `mediagrains.numpy` sublibrary for handling video grains as numpy arrays, in python 3.6+ +- Added `PSNR` option to grain compare. ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index a51cb7d..b5c5aa3 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -163,4 +163,4 @@ def COG_FRAME_FORMAT_V_SHIFT(fmt): def COG_FRAME_FORMAT_ACTIVE_BITS(fmt): - return (((int(fmt)) >> 10) & 0x3F) \ No newline at end of file + return (((int(fmt)) >> 10) & 0x3F) diff --git a/mediagrains/comparison/__init__.py b/mediagrains/comparison/__init__.py index cd81829..ac3f15e 100644 --- a/mediagrains/comparison/__init__.py +++ b/mediagrains/comparison/__init__.py @@ -29,8 +29,9 @@ from __future__ import absolute_import from ._internal import GrainComparisonResult, GrainIteratorComparisonResult +from .psnr import compute_psnr -__all__ = ["compare_grain"] +__all__ = ["compare_grain", "compute_psnr"] # diff --git a/mediagrains/comparison/_internal.py b/mediagrains/comparison/_internal.py index 86b41b2..018ba38 100644 --- a/mediagrains/comparison/_internal.py +++ b/mediagrains/comparison/_internal.py @@ -27,7 +27,8 @@ from ..cogenums import CogAudioFormat, CogFrameFormat, COG_FRAME_IS_PACKED, COG_FRAME_IS_COMPRESSED, COG_FRAME_FORMAT_BYTES_PER_VALUE -from .options import Exclude, Include, ComparisonExclude, ComparisonExpectDifferenceMatches +from .options import Exclude, Include, ComparisonExclude, ComparisonExpectDifferenceMatches, ComparisonPSNR +from .psnr import compute_psnr # @@ -353,6 +354,40 @@ def __init__(self, identifier, a, b, expected_difference=TimeOffset(0), **kwargs super(TimestampDifferanceComparisonResult, self).__init__(identifier, a, b, expected_difference=expected_difference, **kwargs) +class PSNRComparisonResult(ComparisonResult): + def __init__(self, identifier, a, b, **kwargs): + """Compute the PSNR for two grains and compare the result with the expected values and comparison operator. + + :param identifier: The path in the grain structure + :param a: A GRAIN + :param b: Another GRAIN + :param kwargs: Other named arguments + """ + super(PSNRComparisonResult, self).__init__(identifier, a, b, **kwargs) + + def compare(self, a, b): + opts = [option for option in self._options if isinstance(option, ComparisonPSNR) and self.identifier == option.path] + + if self.excluded(): + return (False, "For speed reasons not comparing {} and {} when this would be excluded".format(self._identifier.format('a'), + self._identifier.format('b')), []) + + try: + psnr = compute_psnr(a, b) + except NotImplementedError: + return (False, "Grain is not supported for PSNR comparison of {} and {}".format(self._identifier.format('a'), + self._identifier.format('b')), []) + + if all(opt.matcher(psnr) for opt in opts): + return (True, "PSNR({}, {}) == {!r}, meets requirements set in options".format(self._identifier.format('a'), + self._identifier.format('b'), + psnr), []) + else: + return (False, "PSNR({}, {}) == {!r}, does not meet requirements set in options".format(self._identifier.format('a'), + self._identifier.format('b'), + psnr), []) + + class AOnlyComparisonResult(ComparisonResult): def __init__(self, identifier, a, **kwargs): super(AOnlyComparisonResult, self).__init__(identifier, a, None, **kwargs) @@ -594,29 +629,37 @@ def compare(self, a, b): children[key] = EqualityComparisonResult(path, getattr(a, key), getattr(b, key), options=self._options, attr=key) if a.format == b.format: - if COG_FRAME_IS_COMPRESSED(a.format): - wc = 'B' - elif a.format == CogFrameFormat.v210: - wc = 'I' - elif a.format == CogFrameFormat.v216: - wc = 'H' - elif COG_FRAME_IS_PACKED(a.format): - wc = 'B' + path = self._identifier + '.data' + compare_psnr = len([option for option in self._options if isinstance(option, ComparisonPSNR) and path == option.path]) != 0 + if compare_psnr: + children['data'] = PSNRComparisonResult(path, + a, + b, + options=self._options) else: - if COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 1: + if COG_FRAME_IS_COMPRESSED(a.format): wc = 'B' - elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 2: - wc = 'H' - elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 4: + elif a.format == CogFrameFormat.v210: wc = 'I' - - children['data'] = DataEqualityComparisonResult(self._identifier + ".data", - a.data, - b.data, - options=self._options, - attr="data", - alignment="@", - word_code=wc) + elif a.format == CogFrameFormat.v216: + wc = 'H' + elif COG_FRAME_IS_PACKED(a.format): + wc = 'B' + else: + if COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 1: + wc = 'B' + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 2: + wc = 'H' + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 4: + wc = 'I' + + children['data'] = DataEqualityComparisonResult(self._identifier + ".data", + a.data, + b.data, + options=self._options, + attr="data", + alignment="@", + word_code=wc) else: self._options.append(Exclude.data) children['data'] = FailingComparisonResult(self._identifier + ".data", diff --git a/mediagrains/comparison/options.py b/mediagrains/comparison/options.py index 1213b5a..c0ff9be 100644 --- a/mediagrains/comparison/options.py +++ b/mediagrains/comparison/options.py @@ -44,7 +44,7 @@ from __future__ import absolute_import -__all__ = ["Exclude", "Include", "ExpectedDifference", "CompareOnlyMetadata"] +__all__ = ["Exclude", "Include", "ExpectedDifference", "CompareOnlyMetadata", "PSNR"] # @@ -108,6 +108,53 @@ def __getattr__(self, attr): return _ExpectedDifference(self.path + "." + attr) +class _PSNR(object): + def __init__(self, path): + self.path = path + + def __repr__(self): + return self.path.format("PSNR") + + def __lt__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x < comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} < {!r}".format('PSNR', other)) + + def __le__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x <= comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} <= {!r}".format('PSNR', other)) + + def __gt__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x > comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} > {!r}".format('PSNR', other)) + + def __ge__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x >= comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} >= {!r}".format('PSNR', other)) + + def __getattr__(self, attr): + return _PSNR(self.path + "." + attr) + + class ComparisonOption(object): def __init__(self, path): self.path = path @@ -142,6 +189,16 @@ def __repr__(self): return self._repr +class ComparisonPSNR(ComparisonOption): + def __init__(self, path, matcher, _repr): + self.matcher = matcher + self._repr = _repr + super(ComparisonPSNR, self).__init__(path) + + def __repr__(self): + return self._repr + + Exclude = _Exclude() @@ -152,3 +209,6 @@ def __repr__(self): CompareOnlyMetadata = Exclude.data + + +PSNR = _PSNR("{}") diff --git a/mediagrains/comparison/psnr.py b/mediagrains/comparison/psnr.py new file mode 100644 index 0000000..8a27c3c --- /dev/null +++ b/mediagrains/comparison/psnr.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import print_function +from __future__ import absolute_import + +import math +import numpy as np + +from ..cogenums import COG_FRAME_FORMAT_BYTES_PER_VALUE, COG_FRAME_FORMAT_ACTIVE_BITS +from ..cogenums import COG_FRAME_IS_COMPRESSED, COG_FRAME_IS_PACKED + +__all__ = ["compute_psnr"] + + +def _compute_comp_mse(format, data_a, comp_a, data_b, comp_b): + """Compute MSE (Mean Squared Error) for video component. + + Currently supports planar components only. + + :param format: The COG format + :param data_a: Data bytes for GRAIN component a + :param comp_a: COMPONENT for GRAIN a + :param data_b: Data bytes for GRAIN component b + :param comp_b: COMPONENT for GRAIN b + :returns: The MSE value + """ + bpp = COG_FRAME_FORMAT_BYTES_PER_VALUE(format) + if bpp == 1: + dtype = np.uint8 + elif bpp == 2: + dtype = np.uint16 + elif bpp == 4: + dtype = np.uint32 + + total = 0 + for y in range(0, comp_a.height): + line_a = data_a[y*comp_a.stride + comp_a.offset:y*comp_a.stride + comp_a.offset + comp_a.width*bpp] + line_b = data_b[y*comp_b.stride + comp_b.offset:y*comp_b.stride + comp_b.offset + comp_b.width*bpp] + np_line_a = np.frombuffer(line_a, dtype=dtype) + np_line_b = np.frombuffer(line_b, dtype=dtype) + total += np.sum(np.square(np.subtract(np_line_a, np_line_b))) + + return total / (comp_a.width*comp_a.height) + + +def _compute_comp_psnr(format, data_a, comp_a, data_b, comp_b, max_val): + """Compute PSNR for video component. + + Currently supports planar components only. + + :param format: The COG format + :param data_a: Data bytes for GRAIN component a + :param comp_a: COMPONENT for GRAIN a + :param data_b: Data bytes for GRAIN component b + :param comp_b: COMPONENT for GRAIN b + :param max_val: Maximum value for a component pixel + :returns: The PSNR + """ + mse = _compute_comp_mse(format, data_a, comp_a, data_b, comp_b) + if mse == 0: + return float('Inf') + else: + return 10.0 * math.log10((max_val**2)/mse) + + +def compute_psnr(grain_a, grain_b): + """Compute PSNR for video grains. + + :param grain_a: A video GRAIN + :param grain_b: A video GRAIN + :returns: A list of PSNR value for each video component + """ + if grain_a.grain_type != grain_b.grain_type or grain_a.grain_type != "video": + raise AttributeError("Invalid grain types") + if grain_a.width != grain_b.width or grain_a.height != grain_b.height: + raise AttributeError("Frame dimensions differ") + + if COG_FRAME_IS_COMPRESSED(grain_a.format) or COG_FRAME_IS_PACKED(grain_a.format): + raise NotImplementedError("Grain format not supported") + + psnr = [] + data_a = bytes(grain_a.data) + data_b = bytes(grain_b.data) + max_val = (1 << COG_FRAME_FORMAT_ACTIVE_BITS(grain_a.format)) - 1 + for comp_a, comp_b in zip(grain_a.components, grain_b.components): + psnr.append(_compute_comp_psnr(grain_a.format, data_a, comp_a, data_b, comp_b, max_val)) + + return psnr diff --git a/setup.py b/setup.py index 1b9aac0..e31c6b8 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ "six >= 1.10.0", "frozendict >= 1.2", 'numpy >= 1.17.2;python_version>="3.6"', + 'numpy;python_version<"3.6"' ] deps_required = [] From 0c9259d1f1b64b48b097eb059b219fef98e94019 Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Tue, 15 Oct 2019 17:38:48 +0100 Subject: [PATCH 49/76] add a psnr test --- tests/test_psnr.py | 152 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 tests/test_psnr.py diff --git a/tests/test_psnr.py b/tests/test_psnr.py new file mode 100644 index 0000000..42a7161 --- /dev/null +++ b/tests/test_psnr.py @@ -0,0 +1,152 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import print_function +from __future__ import absolute_import + +from unittest import TestCase +import uuid + +from mediagrains import VideoGrain +from mediagrains.cogenums import CogFrameFormat +from mediagrains.comparison import compute_psnr + +SRC_ID = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") +FLOW_ID = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + + +pixel_ranges = { + CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), +} + + +def set_colour_bars(vg, noise_mask=0xffff): + """The code, except for the noise_mask, was copied from testsignalgenerator. It was duplicated here to keep + the unit tests isolated. + + :params vg: A video GRAIN + :params noise_mask: A mask applied to the colour bar line pixels + """ + cog_frame_format = vg.format + intensity = 0.75 + + _bpp = pixel_ranges[cog_frame_format][0] + _steps = 8 + bs = 16 - pixel_ranges[cog_frame_format][4] + + values = [ + (int((0xFFFF >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs), + (int((0xE1FF >> bs) * intensity), 0x0000 >> bs, 0x9400 >> bs), + (int((0xB200 >> bs) * intensity), 0xABFF >> bs, 0x0000 >> bs), + (int((0x95FF >> bs) * intensity), 0x2BFF >> bs, 0x15FF >> bs), + (int((0x69FF >> bs) * intensity), 0xD400 >> bs, 0xEA00 >> bs), + (int((0x4C00 >> bs) * intensity), 0x5400 >> bs, 0xFFFF >> bs), + (int((0x1DFF >> bs) * intensity), 0xFFFF >> bs, 0x6BFF >> bs), + (int((0x0000 >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs)] + + lines = [bytearray(vg.components[0].width*_bpp), bytearray(vg.components[1].width*_bpp), bytearray(vg.components[2].width*_bpp)] + for c in range(0, 3): + for x in range(0, vg.components[c].width): + pos = x//(vg.components[c].width//_steps) + if _bpp == 1: + lines[c][x] = values[pos][c] & noise_mask + elif _bpp == 2: + lines[c][2*x + 0] = ((values[pos][c] & noise_mask) & 0xFF) + lines[c][2*x + 1] = ((values[pos][c] & noise_mask) >> 8) & 0xFF + + for c in range(0, 3): + for y in range(0, vg.components[c].height): + offset = vg.components[c].offset + y*vg.components[c].stride + vg.data[offset:offset + vg.components[c].width*_bpp] = lines[c] + + +class TestPSNR(TestCase): + def _check_psnr_range(self, computed, ranges, max_diff): + for psnr, psnr_range in zip(computed, ranges): + if psnr < psnr_range - max_diff or psnr > psnr_range + max_diff: + return False + return True + + def _create_grain(self, cog_frame_format): + return VideoGrain(SRC_ID, FLOW_ID, + cog_frame_format=cog_frame_format, + width=480, height=270) + + def test_identical_data(self): + grain = self._create_grain(CogFrameFormat.U8_422) + set_colour_bars(grain, noise_mask=0xfa) + + self.assertEqual(compute_psnr(grain, grain), [float('Inf'), float('Inf'), float('Inf')]) + + def test_planar_8bit(self): + grain_a = self._create_grain(CogFrameFormat.U8_422) + set_colour_bars(grain_a) + grain_b = self._create_grain(CogFrameFormat.U8_422) + set_colour_bars(grain_b, noise_mask=0xfffa) + + psnr = compute_psnr(grain_a, grain_b) + self.assertTrue(self._check_psnr_range(psnr, [36.47984486113692, 39.45318336217709, 38.90095545159027], 0.1)) + + def test_planar_10bit(self): + grain_a = self._create_grain(CogFrameFormat.S16_422_10BIT) + set_colour_bars(grain_a) + grain_b = self._create_grain(CogFrameFormat.S16_422_10BIT) + set_colour_bars(grain_b, noise_mask=0xfffa) + + psnr = compute_psnr(grain_a, grain_b) + self.assertTrue(self._check_psnr_range(psnr, [48.8541475647564, 50.477799910245636, 50.477799910245636], 0.1)) + + def test_planar_12bit(self): + grain_a = self._create_grain(CogFrameFormat.S16_422_12BIT) + set_colour_bars(grain_a) + grain_b = self._create_grain(CogFrameFormat.S16_422_12BIT) + set_colour_bars(grain_b, noise_mask=0xfffa) + + psnr = compute_psnr(grain_a, grain_b) + self.assertTrue(self._check_psnr_range(psnr, [60.30687786176762, 62.525365357931186, 62.525365357931186], 0.1)) + + def test_planar_16bit(self): + grain_a = self._create_grain(CogFrameFormat.S16_422) + set_colour_bars(grain_a) + grain_b = self._create_grain(CogFrameFormat.S16_422) + set_colour_bars(grain_b, noise_mask=0xfffa) + + psnr = compute_psnr(grain_a, grain_b) + self.assertTrue(self._check_psnr_range(psnr, [84.39126581514387, 86.60975331130743, 86.60975331130743], 0.1)) + + def test_compressed_unsupported(self): + grain = self._create_grain(CogFrameFormat.H264) + + with self.assertRaises(NotImplementedError): + compute_psnr(grain, grain) + + def test_packed_unsupported(self): + grain = self._create_grain(CogFrameFormat.UYVY) + + with self.assertRaises(NotImplementedError): + compute_psnr(grain, grain) From 4fe1317e2fc8a9551683db986186c51cad7f4454 Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Wed, 16 Oct 2019 13:21:53 +0100 Subject: [PATCH 50/76] simplify psnr test --- tests/test_psnr.py | 41 +++++++++++++---------------------------- 1 file changed, 13 insertions(+), 28 deletions(-) diff --git a/tests/test_psnr.py b/tests/test_psnr.py index 42a7161..f77cd9a 100644 --- a/tests/test_psnr.py +++ b/tests/test_psnr.py @@ -97,6 +97,15 @@ def _create_grain(self, cog_frame_format): cog_frame_format=cog_frame_format, width=480, height=270) + def _test_format(self, cog_frame_format, expected): + grain_a = self._create_grain(cog_frame_format) + set_colour_bars(grain_a) + grain_b = self._create_grain(cog_frame_format) + set_colour_bars(grain_b, noise_mask=0xfffa) + + psnr = compute_psnr(grain_a, grain_b) + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1)) + def test_identical_data(self): grain = self._create_grain(CogFrameFormat.U8_422) set_colour_bars(grain, noise_mask=0xfa) @@ -104,40 +113,16 @@ def test_identical_data(self): self.assertEqual(compute_psnr(grain, grain), [float('Inf'), float('Inf'), float('Inf')]) def test_planar_8bit(self): - grain_a = self._create_grain(CogFrameFormat.U8_422) - set_colour_bars(grain_a) - grain_b = self._create_grain(CogFrameFormat.U8_422) - set_colour_bars(grain_b, noise_mask=0xfffa) - - psnr = compute_psnr(grain_a, grain_b) - self.assertTrue(self._check_psnr_range(psnr, [36.47984486113692, 39.45318336217709, 38.90095545159027], 0.1)) + self._test_format(CogFrameFormat.U8_422, [36.47984486113692, 39.45318336217709, 38.90095545159027]) def test_planar_10bit(self): - grain_a = self._create_grain(CogFrameFormat.S16_422_10BIT) - set_colour_bars(grain_a) - grain_b = self._create_grain(CogFrameFormat.S16_422_10BIT) - set_colour_bars(grain_b, noise_mask=0xfffa) - - psnr = compute_psnr(grain_a, grain_b) - self.assertTrue(self._check_psnr_range(psnr, [48.8541475647564, 50.477799910245636, 50.477799910245636], 0.1)) + self._test_format(CogFrameFormat.S16_422_10BIT, [48.8541475647564, 50.477799910245636, 50.477799910245636]) def test_planar_12bit(self): - grain_a = self._create_grain(CogFrameFormat.S16_422_12BIT) - set_colour_bars(grain_a) - grain_b = self._create_grain(CogFrameFormat.S16_422_12BIT) - set_colour_bars(grain_b, noise_mask=0xfffa) - - psnr = compute_psnr(grain_a, grain_b) - self.assertTrue(self._check_psnr_range(psnr, [60.30687786176762, 62.525365357931186, 62.525365357931186], 0.1)) + self._test_format(CogFrameFormat.S16_422_12BIT, [60.30687786176762, 62.525365357931186, 62.525365357931186]) def test_planar_16bit(self): - grain_a = self._create_grain(CogFrameFormat.S16_422) - set_colour_bars(grain_a) - grain_b = self._create_grain(CogFrameFormat.S16_422) - set_colour_bars(grain_b, noise_mask=0xfffa) - - psnr = compute_psnr(grain_a, grain_b) - self.assertTrue(self._check_psnr_range(psnr, [84.39126581514387, 86.60975331130743, 86.60975331130743], 0.1)) + self._test_format(CogFrameFormat.S16_422, [84.39126581514387, 86.60975331130743, 86.60975331130743]) def test_compressed_unsupported(self): grain = self._create_grain(CogFrameFormat.H264) From f6042d319a88eb255b87c7a52e61466ebaa34be2 Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Wed, 16 Oct 2019 15:12:42 +0100 Subject: [PATCH 51/76] support psnr on numpy grains including packed --- mediagrains/comparison/psnr.py | 177 +++++++++++++++++---------------- mediagrains_py36/psnr.py | 75 ++++++++++++++ tests/test_psnr.py | 88 +++++++++++----- 3 files changed, 232 insertions(+), 108 deletions(-) create mode 100644 mediagrains_py36/psnr.py diff --git a/mediagrains/comparison/psnr.py b/mediagrains/comparison/psnr.py index 8a27c3c..2ffba46 100644 --- a/mediagrains/comparison/psnr.py +++ b/mediagrains/comparison/psnr.py @@ -18,86 +18,97 @@ from __future__ import print_function from __future__ import absolute_import -import math -import numpy as np - -from ..cogenums import COG_FRAME_FORMAT_BYTES_PER_VALUE, COG_FRAME_FORMAT_ACTIVE_BITS -from ..cogenums import COG_FRAME_IS_COMPRESSED, COG_FRAME_IS_PACKED - -__all__ = ["compute_psnr"] - - -def _compute_comp_mse(format, data_a, comp_a, data_b, comp_b): - """Compute MSE (Mean Squared Error) for video component. - - Currently supports planar components only. - - :param format: The COG format - :param data_a: Data bytes for GRAIN component a - :param comp_a: COMPONENT for GRAIN a - :param data_b: Data bytes for GRAIN component b - :param comp_b: COMPONENT for GRAIN b - :returns: The MSE value - """ - bpp = COG_FRAME_FORMAT_BYTES_PER_VALUE(format) - if bpp == 1: - dtype = np.uint8 - elif bpp == 2: - dtype = np.uint16 - elif bpp == 4: - dtype = np.uint32 - - total = 0 - for y in range(0, comp_a.height): - line_a = data_a[y*comp_a.stride + comp_a.offset:y*comp_a.stride + comp_a.offset + comp_a.width*bpp] - line_b = data_b[y*comp_b.stride + comp_b.offset:y*comp_b.stride + comp_b.offset + comp_b.width*bpp] - np_line_a = np.frombuffer(line_a, dtype=dtype) - np_line_b = np.frombuffer(line_b, dtype=dtype) - total += np.sum(np.square(np.subtract(np_line_a, np_line_b))) - - return total / (comp_a.width*comp_a.height) - - -def _compute_comp_psnr(format, data_a, comp_a, data_b, comp_b, max_val): - """Compute PSNR for video component. - - Currently supports planar components only. - - :param format: The COG format - :param data_a: Data bytes for GRAIN component a - :param comp_a: COMPONENT for GRAIN a - :param data_b: Data bytes for GRAIN component b - :param comp_b: COMPONENT for GRAIN b - :param max_val: Maximum value for a component pixel - :returns: The PSNR - """ - mse = _compute_comp_mse(format, data_a, comp_a, data_b, comp_b) - if mse == 0: - return float('Inf') - else: - return 10.0 * math.log10((max_val**2)/mse) - - -def compute_psnr(grain_a, grain_b): - """Compute PSNR for video grains. - - :param grain_a: A video GRAIN - :param grain_b: A video GRAIN - :returns: A list of PSNR value for each video component - """ - if grain_a.grain_type != grain_b.grain_type or grain_a.grain_type != "video": - raise AttributeError("Invalid grain types") - if grain_a.width != grain_b.width or grain_a.height != grain_b.height: - raise AttributeError("Frame dimensions differ") - - if COG_FRAME_IS_COMPRESSED(grain_a.format) or COG_FRAME_IS_PACKED(grain_a.format): - raise NotImplementedError("Grain format not supported") - - psnr = [] - data_a = bytes(grain_a.data) - data_b = bytes(grain_b.data) - max_val = (1 << COG_FRAME_FORMAT_ACTIVE_BITS(grain_a.format)) - 1 - for comp_a, comp_b in zip(grain_a.components, grain_b.components): - psnr.append(_compute_comp_psnr(grain_a.format, data_a, comp_a, data_b, comp_b, max_val)) - - return psnr +from sys import version_info + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + from mediagrains_py36.psnr import compute_psnr + + __all__ = ["compute_psnr"] + +else: + import math + import numpy as np + + from ..cogenums import COG_FRAME_FORMAT_BYTES_PER_VALUE, COG_FRAME_FORMAT_ACTIVE_BITS + from ..cogenums import COG_FRAME_IS_COMPRESSED, COG_FRAME_IS_PACKED + + __all__ = ["compute_psnr"] + + + def _compute_comp_mse(format, data_a, comp_a, data_b, comp_b): + """Compute MSE (Mean Squared Error) for video component. + + Currently supports planar components only. + + :param format: The COG format + :param data_a: Data bytes for GRAIN component a + :param comp_a: COMPONENT for GRAIN a + :param data_b: Data bytes for GRAIN component b + :param comp_b: COMPONENT for GRAIN b + :returns: The MSE value + """ + if COG_FRAME_IS_PACKED(format): + raise NotImplementedError("Packed video format is not supported in this version of python") + + bpp = COG_FRAME_FORMAT_BYTES_PER_VALUE(format) + if bpp == 1: + dtype = np.uint8 + elif bpp == 2: + dtype = np.uint16 + elif bpp == 4: + dtype = np.uint32 + + total = 0 + for y in range(0, comp_a.height): + line_a = data_a[y*comp_a.stride + comp_a.offset:y*comp_a.stride + comp_a.offset + comp_a.width*bpp] + line_b = data_b[y*comp_b.stride + comp_b.offset:y*comp_b.stride + comp_b.offset + comp_b.width*bpp] + np_line_a = np.frombuffer(line_a, dtype=dtype) + np_line_b = np.frombuffer(line_b, dtype=dtype) + total += np.sum(np.square(np.subtract(np_line_a, np_line_b))) + + return total / (comp_a.width*comp_a.height) + + + def _compute_comp_psnr(format, data_a, comp_a, data_b, comp_b, max_val): + """Compute PSNR for video component. + + Currently supports planar components only. + + :param format: The COG format + :param data_a: Data bytes for GRAIN component a + :param comp_a: COMPONENT for GRAIN a + :param data_b: Data bytes for GRAIN component b + :param comp_b: COMPONENT for GRAIN b + :param max_val: Maximum value for a component pixel + :returns: The PSNR + """ + mse = _compute_comp_mse(format, data_a, comp_a, data_b, comp_b) + if mse == 0: + return float('Inf') + else: + return 10.0 * math.log10((max_val**2)/mse) + + + def compute_psnr(grain_a, grain_b): + """Compute PSNR for video grains. + + :param grain_a: A video GRAIN + :param grain_b: A video GRAIN + :returns: A list of PSNR value for each video component + """ + if grain_a.grain_type != grain_b.grain_type or grain_a.grain_type != "video": + raise AttributeError("Invalid grain types") + if grain_a.width != grain_b.width or grain_a.height != grain_b.height: + raise AttributeError("Frame dimensions differ") + + if COG_FRAME_IS_COMPRESSED(grain_a.format): + raise NotImplementedError("Compressed video is not supported") + + psnr = [] + data_a = bytes(grain_a.data) + data_b = bytes(grain_b.data) + max_val = (1 << COG_FRAME_FORMAT_ACTIVE_BITS(grain_a.format)) - 1 + for comp_a, comp_b in zip(grain_a.components, grain_b.components): + psnr.append(_compute_comp_psnr(grain_a.format, data_a, comp_a, data_b, comp_b, max_val)) + + return psnr diff --git a/mediagrains_py36/psnr.py b/mediagrains_py36/psnr.py new file mode 100644 index 0000000..e3f53ec --- /dev/null +++ b/mediagrains_py36/psnr.py @@ -0,0 +1,75 @@ +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import math +import numpy as np + +from mediagrains.cogenums import COG_FRAME_IS_COMPRESSED, COG_FRAME_FORMAT_ACTIVE_BITS +from mediagrains.numpy import VideoGrain as numpy_VideoGrain, VIDEOGRAIN as numpy_VIDEOGRAIN + +__all__ = ["compute_psnr"] + + +def _compute_comp_mse(data_a, data_b): + """Compute MSE (Mean Squared Error) for video component. + + :param data_a: Data for component a + :param data_b: Data for component b + :returns: The MSE value + """ + return np.mean(np.square(np.subtract(data_a, data_b))) + + +def _compute_comp_psnr(data_a, data_b, max_val): + """Compute PSNR for video component. + + :param data_a: Data for component a + :param data_b: Data for component b + :param max_val: Maximum value for a component pixel + :returns: The PSNR + """ + mse = _compute_comp_mse(data_a, data_b) + if mse == 0: + return float('Inf') + else: + return 10.0 * math.log10((max_val**2)/mse) + + +def compute_psnr(grain_a, grain_b): + """Compute PSNR for video grains. + + :param grain_a: A VIDEOGRAIN + :param grain_b: A VIDEOGRAIN + :returns: A list of PSNR value for each video component + """ + if grain_a.grain_type != grain_b.grain_type or grain_a.grain_type != "video": + raise AttributeError("Invalid grain types") + if grain_a.width != grain_b.width or grain_a.height != grain_b.height: + raise AttributeError("Frame dimensions differ") + + if COG_FRAME_IS_COMPRESSED(grain_a.format): + raise NotImplementedError("Compressed video is not supported") + + if not isinstance(grain_a, numpy_VIDEOGRAIN): + grain_a = numpy_VideoGrain(grain_a) + if not isinstance(grain_b, numpy_VIDEOGRAIN): + grain_b = numpy_VideoGrain(grain_b) + + psnr = [] + max_val = (1 << COG_FRAME_FORMAT_ACTIVE_BITS(grain_a.format)) - 1 + for comp_data_a, comp_data_b in zip(grain_a.component_data, grain_b.component_data): + psnr.append(_compute_comp_psnr(comp_data_a, comp_data_b, max_val)) + + return psnr diff --git a/tests/test_psnr.py b/tests/test_psnr.py index f77cd9a..e73db1f 100644 --- a/tests/test_psnr.py +++ b/tests/test_psnr.py @@ -19,6 +19,7 @@ from __future__ import absolute_import from unittest import TestCase +from sys import version_info import uuid from mediagrains import VideoGrain @@ -45,7 +46,13 @@ } -def set_colour_bars(vg, noise_mask=0xffff): +def _create_grain(cog_frame_format): + return VideoGrain(SRC_ID, FLOW_ID, + cog_frame_format=cog_frame_format, + width=480, height=270) + + +def _set_colour_bars(vg, noise_mask=0xffff): """The code, except for the noise_mask, was copied from testsignalgenerator. It was duplicated here to keep the unit tests isolated. @@ -85,6 +92,30 @@ def set_colour_bars(vg, noise_mask=0xffff): vg.data[offset:offset + vg.components[c].width*_bpp] = lines[c] +def _convert_u8_uyvy(grain_u8): + grain_uyvy = _create_grain(CogFrameFormat.UYVY) + for y in range(0, grain_u8.height): + for x in range(0, grain_u8.width//2): + # U + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 0] = grain_u8.data[grain_u8.components[1].offset + + y*grain_u8.components[1].stride + + x] + # Y + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 1] = grain_u8.data[grain_u8.components[0].offset + + y*grain_u8.components[0].stride + + 2*x + 0] + # V + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 2] = grain_u8.data[grain_u8.components[2].offset + + y*grain_u8.components[2].stride + + x] + # Y + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 3] = grain_u8.data[grain_u8.components[0].offset + + y*grain_u8.components[0].stride + + 2*x + 1] + + return grain_uyvy + + class TestPSNR(TestCase): def _check_psnr_range(self, computed, ranges, max_diff): for psnr, psnr_range in zip(computed, ranges): @@ -92,46 +123,53 @@ def _check_psnr_range(self, computed, ranges, max_diff): return False return True - def _create_grain(self, cog_frame_format): - return VideoGrain(SRC_ID, FLOW_ID, - cog_frame_format=cog_frame_format, - width=480, height=270) - - def _test_format(self, cog_frame_format, expected): - grain_a = self._create_grain(cog_frame_format) - set_colour_bars(grain_a) - grain_b = self._create_grain(cog_frame_format) - set_colour_bars(grain_b, noise_mask=0xfffa) + def _test_planar_format(self, cog_frame_format, expected): + grain_a = _create_grain(cog_frame_format) + _set_colour_bars(grain_a) + grain_b = _create_grain(cog_frame_format) + _set_colour_bars(grain_b, noise_mask=0xfffa) psnr = compute_psnr(grain_a, grain_b) - self.assertTrue(self._check_psnr_range(psnr, expected, 0.1)) + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1), "{} != {}".format(psnr, expected)) def test_identical_data(self): - grain = self._create_grain(CogFrameFormat.U8_422) - set_colour_bars(grain, noise_mask=0xfa) + grain = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(grain, noise_mask=0xfa) self.assertEqual(compute_psnr(grain, grain), [float('Inf'), float('Inf'), float('Inf')]) def test_planar_8bit(self): - self._test_format(CogFrameFormat.U8_422, [36.47984486113692, 39.45318336217709, 38.90095545159027]) + self._test_planar_format(CogFrameFormat.U8_422, [36.47984486113692, 39.45318336217709, 38.90095545159027]) def test_planar_10bit(self): - self._test_format(CogFrameFormat.S16_422_10BIT, [48.8541475647564, 50.477799910245636, 50.477799910245636]) + self._test_planar_format(CogFrameFormat.S16_422_10BIT, [48.8541475647564, 50.477799910245636, 50.477799910245636]) def test_planar_12bit(self): - self._test_format(CogFrameFormat.S16_422_12BIT, [60.30687786176762, 62.525365357931186, 62.525365357931186]) + self._test_planar_format(CogFrameFormat.S16_422_12BIT, [60.30687786176762, 62.525365357931186, 62.525365357931186]) def test_planar_16bit(self): - self._test_format(CogFrameFormat.S16_422, [84.39126581514387, 86.60975331130743, 86.60975331130743]) + self._test_planar_format(CogFrameFormat.S16_422, [84.39126581514387, 86.60975331130743, 86.60975331130743]) + + def test_uyvy_format(self): + planar_grain_a = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(planar_grain_a) + grain_a = _convert_u8_uyvy(planar_grain_a) + + planar_grain_b = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(planar_grain_b, noise_mask=0xfffa) + grain_b = _convert_u8_uyvy(planar_grain_b) + + if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + psnr = compute_psnr(grain_a, grain_b) + expected = [36.47984486113692, 39.45318336217709, 38.90095545159027] + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1), + "{} != {}".format(psnr, expected)) + else: + with self.assertRaises(NotImplementedError): + compute_psnr(grain_a, grain_b) def test_compressed_unsupported(self): - grain = self._create_grain(CogFrameFormat.H264) - - with self.assertRaises(NotImplementedError): - compute_psnr(grain, grain) - - def test_packed_unsupported(self): - grain = self._create_grain(CogFrameFormat.UYVY) + grain = _create_grain(CogFrameFormat.H264) with self.assertRaises(NotImplementedError): compute_psnr(grain, grain) From 8d0ade9d08f7f635909b23dd7a8e5d6fcd512e3f Mon Sep 17 00:00:00 2001 From: Philip de Nier Date: Wed, 16 Oct 2019 15:21:35 +0100 Subject: [PATCH 52/76] test mixed format psnr for py36 --- mediagrains/comparison/psnr.py | 2 ++ tests/test_psnr.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/mediagrains/comparison/psnr.py b/mediagrains/comparison/psnr.py index 2ffba46..ae633e8 100644 --- a/mediagrains/comparison/psnr.py +++ b/mediagrains/comparison/psnr.py @@ -101,6 +101,8 @@ def compute_psnr(grain_a, grain_b): if grain_a.width != grain_b.width or grain_a.height != grain_b.height: raise AttributeError("Frame dimensions differ") + if grain_a.format != grain_b.format: + raise NotImplementedError("Different grain formats not supported") if COG_FRAME_IS_COMPRESSED(grain_a.format): raise NotImplementedError("Compressed video is not supported") diff --git a/tests/test_psnr.py b/tests/test_psnr.py index e73db1f..56f7981 100644 --- a/tests/test_psnr.py +++ b/tests/test_psnr.py @@ -168,6 +168,23 @@ def test_uyvy_format(self): with self.assertRaises(NotImplementedError): compute_psnr(grain_a, grain_b) + def test_mixed_format(self): + grain_a = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(grain_a) + + planar_grain_b = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(planar_grain_b, noise_mask=0xfffa) + grain_b = _convert_u8_uyvy(planar_grain_b) + + if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + psnr = compute_psnr(grain_a, grain_b) + expected = [36.47984486113692, 39.45318336217709, 38.90095545159027] + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1), + "{} != {}".format(psnr, expected)) + else: + with self.assertRaises(NotImplementedError): + compute_psnr(grain_a, grain_b) + def test_compressed_unsupported(self): grain = _create_grain(CogFrameFormat.H264) From fa94fd4a03be49f36d6fabc3bc18b2c3883beeab Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 14 Oct 2019 16:11:21 +0100 Subject: [PATCH 53/76] numpy.convert: working test pattern generation for conversion tests --- mediagrains_py36/numpy/__init__.py | 9 ++ mediagrains_py36/numpy/convert.py | 0 tests/test36_numpy_videograin.py | 129 ++++++++++++++--------------- 3 files changed, 72 insertions(+), 66 deletions(-) create mode 100644 mediagrains_py36/numpy/convert.py diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index dd83fb3..fe36831 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -218,6 +218,15 @@ def __repr__(self): else: return "{}({!r},< numpy data of length {} >)".format(self._factory, self.meta, len(self.data)) + def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Used to convert this grain to a different cog format. + + :param fmt: The format to convert to + :returns: A new grain of the specified format. Notably converting to the same format is the same as a deepcopy + :raises: NotImplementedError if the requested conversion is not possible + """ + return deepcopy(self) + def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index c0c229a..a299502 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -37,6 +37,8 @@ import numpy as np +from pdb import set_trace + class TestGrain (TestCase): def _get_bitdepth(self, fmt): @@ -335,74 +337,44 @@ def __inner(grain): self.assertEqual(grain.component_data[2].ndim, 2) self.assertEqual(grain.component_data[2].shape, (width >> hs, height >> vs)) - # Test that changes to the component arrays are reflected in the main data array - for y in range(0, 16): - for x in range(0, 16): - grain.component_data[0][x, y] = (y*16 + x) & 0x3F - - for y in range(0, 16 >> vs): - for x in range(0, 16 >> hs): - grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 - grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 - - if COG_FRAME_IS_PLANAR(fmt): - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) - - for y in range(0, 16 >> vs): - for x in range(0, 16 >> hs): - self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) - - elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: - for y in range(0, 16): - for x in range(0, 8): - self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + 2*x + 0) & 0x3F) - self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) - self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + 2*x + 1) & 0x3F) - - elif fmt == CogFrameFormat.YUYV: - for y in range(0, 16): - for x in range(0, 8): - self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + 2*x + 0) & 0x3F) - self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) - self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) - - elif fmt == CogFrameFormat.RGB: - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width*3 + 3*x + 0], (y*16 + x) & 0x3F) - self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) - - elif fmt in [CogFrameFormat.RGBx, - CogFrameFormat.RGBA, - CogFrameFormat.BGRx, - CogFrameFormat.BGRx]: - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width*4 + 4*x + 0], (y*16 + x) & 0x3F) - self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) - - elif fmt in [CogFrameFormat.ARGB, - CogFrameFormat.xRGB, - CogFrameFormat.ABGR, - CogFrameFormat.xBGR]: - for y in range(0, 16): - for x in range(0, 16): - self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F) - self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x40) - self.assertEqual(grain.data[y*width*4 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) - - else: - raise Exception() - return __inner + def write_test_pattern(self, grain): + fmt = grain.format + (hs, vs, _) = self._get_hs_vs_and_bps(fmt) + bd = self._get_bitdepth(fmt) + + v = (1 << (bd - 2))*3 + R = np.array([[v, v, v, v, 0, 0, 0, 0, v, v, v, v, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose() + G = np.array([[v, v, v, v, v, v, v, v, 0, 0, 0, 0, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose() + B = np.array([[v, v, 0, 0, v, v, 0, 0, v, v, 0, 0, v, v, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose() + + if self._is_rgb(fmt): + grain.component_data.R[0:16, 0:16] = R + grain.component_data.G[0:16, 0:16] = G + grain.component_data.B[0:16, 0:16] = B + else: + Y = R*0.2126 + G*0.7152 + B*0.0722 + U = R*-0.114572 - G*0.385428 + B*0.5 + (1 << bd - 1) + V = R*0.5 - G*0.454153 - B*0.045847 + (1 << bd - 1) + + grain.component_data.Y[0:16, 0:16] = np.around(Y) + + if hs == 0 and vs == 0: + grain.component_data.U[0:16, 0:16] = np.around(U) + grain.component_data.V[0:16, 0:16] = np.around(V) + elif hs == 1 and vs == 0: + grain.component_data.U[0:16, 0:16] = np.around((U[0::2, :] + U[1::2, :])/2) + grain.component_data.V[0:16, 0:16] = np.around((V[0::2, :] + U[1::2, :])/2) + elif hs == 1 and vs == 1: + u = (U[:, 0::2] + U[:, 1::2])/2 + grain.component_data.U[0:16, 0:16] = np.around((u[0::2, :] + u[1::2, :])/2) + v = (V[:, 0::2] + U[:, 1::2])/2 + grain.component_data.V[0:16, 0:16] = np.around((v[0::2, :] + v[1::2, :])/2) + + def assertMatchesTestPattern(self, grain): + self.fail() + def test_video_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -453,6 +425,31 @@ def test_video_grain_create(self): if fmt is not CogFrameFormat.v210: self.assertComponentsAreModifiable(grain) + def test_video_grain_convert(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + for (fmt_in, fmt_out) in [(CogFrameFormat.UYVY, CogFrameFormat.U8_444), + (CogFrameFormat.U8_444, CogFrameFormat.UYVY), + (CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB), + (CogFrameFormat.U8_444_RGB, CogFrameFormat.RGB)]: + with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=fmt_in, + width=16, height=16, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) + self.write_test_pattern(grain_in) + + grain_out = grain_in.convert(fmt_out) + + self.assertIsVideoGrain(fmt_out, width=16, height=16)(grain_out) + self.assertMatchesTestPattern(grain_out) + def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") From 0d49f0a782b919dcea6b6692df977fed8a9bece3 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 10:48:30 +0100 Subject: [PATCH 54/76] numpy.convert: Tests and surrounding infrastructure --- mediagrains_py36/numpy/__init__.py | 7 +- mediagrains_py36/numpy/convert.py | 42 +++++++++ tests/test36_numpy_videograin.py | 140 +++++++++++++++++------------ 3 files changed, 129 insertions(+), 60 deletions(-) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index fe36831..0c01a3e 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -30,6 +30,8 @@ from mediagrains import grain_constructors as bytesgrain_constructors from copy import copy, deepcopy +from .convert import get_grain_conversion_function + import numpy as np from numpy.lib.stride_tricks import as_strided @@ -225,7 +227,10 @@ def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": :returns: A new grain of the specified format. Notably converting to the same format is the same as a deepcopy :raises: NotImplementedError if the requested conversion is not possible """ - return deepcopy(self) + if self.format == fmt: + return deepcopy(self) + else: + return get_grain_conversion_function(self.format, fmt)(self) def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index e69de29..2cb88ff 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -0,0 +1,42 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for converting video grain formats represented as numpy arrays. +""" + +from mediagrains.cogenums import CogFrameFormat +from typing import Callable + +__all__ = ["get_grain_conversion_function"] + + +grain_conversions = {} + + +def grain_conversion(fmt_in: CogFrameFormat, fmt_out: CogFrameFormat): + def _inner(f: Callable[['VideoGrain'], 'VideoGrain']) -> None: + global grain_conversions + grain_conversions[(fmt_in, fmt_out)] = f + return _inner + + +def get_grain_conversion_function(fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VideoGrain"], "VideoGrain"]: + if (fmt_in, fmt_out) in grain_conversions: + return grain_conversions[(fmt_in, fmt_out)] + + raise NotImplementedError("This conversion has not yet been implemented") \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index a299502..1881820 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -34,6 +34,7 @@ import mock from fractions import Fraction from copy import copy, deepcopy +from typing import Tuple import numpy as np @@ -314,66 +315,89 @@ def __inner(grain): self.assertEqual(len(grain.component_data), 0) else: self.assertIsInstance(grain.component_data[0], np.ndarray) - self.assertEqual(grain.component_data[0].nbytes, width*height*bps) - self.assertEqual(grain.component_data[0].dtype, dtype) - self.assertEqual(grain.component_data[0].size, width*height) - self.assertEqual(grain.component_data[0].itemsize, bps) - self.assertEqual(grain.component_data[0].ndim, 2) - self.assertEqual(grain.component_data[0].shape, (width, height)) + self.assertTrue(np.array_equal(grain.component_data[0].nbytes, width*height*bps)) + self.assertTrue(np.array_equal(grain.component_data[0].dtype, dtype)) + self.assertTrue(np.array_equal(grain.component_data[0].size, width*height)) + self.assertTrue(np.array_equal(grain.component_data[0].itemsize, bps)) + self.assertTrue(np.array_equal(grain.component_data[0].ndim, 2)) + self.assertTrue(np.array_equal(grain.component_data[0].shape, (width, height))) self.assertIsInstance(grain.component_data[1], np.ndarray) - self.assertEqual(grain.component_data[1].nbytes, width*height*bps >> (hs + vs)) - self.assertEqual(grain.component_data[1].dtype, dtype) - self.assertEqual(grain.component_data[1].size, width*height >> (hs + vs)) - self.assertEqual(grain.component_data[1].itemsize, bps) - self.assertEqual(grain.component_data[1].ndim, 2) - self.assertEqual(grain.component_data[1].shape, (width >> hs, height >> vs)) + self.assertTrue(np.array_equal(grain.component_data[1].nbytes, width*height*bps >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[1].dtype, dtype)) + self.assertTrue(np.array_equal(grain.component_data[1].size, width*height >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[1].itemsize, bps)) + self.assertTrue(np.array_equal(grain.component_data[1].ndim, 2)) + self.assertTrue(np.array_equal(grain.component_data[1].shape, (width >> hs, height >> vs))) self.assertIsInstance(grain.component_data[2], np.ndarray) - self.assertEqual(grain.component_data[2].nbytes, width*height*bps >> (hs + vs)) - self.assertEqual(grain.component_data[2].dtype, dtype) - self.assertEqual(grain.component_data[2].size, width*height >> (hs + vs)) - self.assertEqual(grain.component_data[2].itemsize, bps) - self.assertEqual(grain.component_data[2].ndim, 2) - self.assertEqual(grain.component_data[2].shape, (width >> hs, height >> vs)) + self.assertTrue(np.array_equal(grain.component_data[2].nbytes, width*height*bps >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[2].dtype, dtype)) + self.assertTrue(np.array_equal(grain.component_data[2].size, width*height >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[2].itemsize, bps)) + self.assertTrue(np.array_equal(grain.component_data[2].ndim, 2)) + self.assertTrue(np.array_equal(grain.component_data[2].shape, (width >> hs, height >> vs))) return __inner - def write_test_pattern(self, grain): - fmt = grain.format - (hs, vs, _) = self._get_hs_vs_and_bps(fmt) + def _test_pattern_rgb(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Return a 16x16 pixel RGB test pattern""" bd = self._get_bitdepth(fmt) v = (1 << (bd - 2))*3 - R = np.array([[v, v, v, v, 0, 0, 0, 0, v, v, v, v, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose() - G = np.array([[v, v, v, v, v, v, v, v, 0, 0, 0, 0, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose() - B = np.array([[v, v, 0, 0, v, v, 0, 0, v, v, 0, 0, v, v, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose() + return (np.array([[v, v, v, v, 0, 0, 0, 0, v, v, v, v, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose(), + np.array([[v, v, v, v, v, v, v, v, 0, 0, 0, 0, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose(), + np.array([[v, v, 0, 0, v, v, 0, 0, v, v, 0, 0, v, v, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose()) + + def _test_pattern_yuv(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + (R, G, B) = self._test_pattern_rgb(fmt) + bd = self._get_bitdepth(fmt) + (hs, vs, _) = self._get_hs_vs_and_bps(fmt) + + Y = (R*0.2126 + G*0.7152 + B*0.0722) + U = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << bd - 1)) + V = (R*0.5 - G*0.454153 - B*0.045847 + (1 << bd - 1)) + + if hs == 1: + U = (U[0::2, :] + U[1::2, :])/2 + V = (V[0::2, :] + V[1::2, :])/2 + if vs == 1: + U = (U[:, 0::2] + U[:, 1::2])/2 + V = (V[:, 0::2] + V[:, 1::2])/2 + + return (np.around(Y), np.around(U), np.around(V)) + + def write_test_pattern(self, grain): + fmt = grain.format if self._is_rgb(fmt): - grain.component_data.R[0:16, 0:16] = R - grain.component_data.G[0:16, 0:16] = G - grain.component_data.B[0:16, 0:16] = B + (R, G, B) = self._test_pattern_rgb(fmt) + + grain.component_data.R[:, :] = R + grain.component_data.G[:, :] = G + grain.component_data.B[:, :] = B else: - Y = R*0.2126 + G*0.7152 + B*0.0722 - U = R*-0.114572 - G*0.385428 + B*0.5 + (1 << bd - 1) - V = R*0.5 - G*0.454153 - B*0.045847 + (1 << bd - 1) - - grain.component_data.Y[0:16, 0:16] = np.around(Y) - - if hs == 0 and vs == 0: - grain.component_data.U[0:16, 0:16] = np.around(U) - grain.component_data.V[0:16, 0:16] = np.around(V) - elif hs == 1 and vs == 0: - grain.component_data.U[0:16, 0:16] = np.around((U[0::2, :] + U[1::2, :])/2) - grain.component_data.V[0:16, 0:16] = np.around((V[0::2, :] + U[1::2, :])/2) - elif hs == 1 and vs == 1: - u = (U[:, 0::2] + U[:, 1::2])/2 - grain.component_data.U[0:16, 0:16] = np.around((u[0::2, :] + u[1::2, :])/2) - v = (V[:, 0::2] + U[:, 1::2])/2 - grain.component_data.V[0:16, 0:16] = np.around((v[0::2, :] + v[1::2, :])/2) + (Y, U, V) = self._test_pattern_yuv(fmt) + + grain.component_data.Y[:, :] = Y + grain.component_data.U[:, :] = U + grain.component_data.V[:, :] = V def assertMatchesTestPattern(self, grain): - self.fail() + fmt = grain.format + + if self._is_rgb(fmt): + (R, G, B) = self._test_pattern_rgb(fmt) + + self.assertTrue(np.array_equal(grain.component_data.R[:, :], R)) + self.assertTrue(np.array_equal(grain.component_data.G[:, :], G)) + self.assertTrue(np.array_equal(grain.component_data.B[:, :], B)) + else: + (Y, U, V) = self._test_pattern_yuv(fmt) + + self.assertTrue(np.array_equal(grain.component_data.Y[:, :], Y)) + self.assertTrue(np.array_equal(grain.component_data.U[:, :], U)) + self.assertTrue(np.array_equal(grain.component_data.V[:, :], V)) def test_video_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") @@ -432,23 +456,21 @@ def test_video_grain_convert(self): ots = Timestamp.from_tai_sec_nsec("417798915:5") sts = Timestamp.from_tai_sec_nsec("417798915:10") - for (fmt_in, fmt_out) in [(CogFrameFormat.UYVY, CogFrameFormat.U8_444), - (CogFrameFormat.U8_444, CogFrameFormat.UYVY), - (CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB), - (CogFrameFormat.U8_444_RGB, CogFrameFormat.RGB)]: - with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): - with mock.patch.object(Timestamp, "get_time", return_value=cts): - grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - cog_frame_format=fmt_in, - width=16, height=16, cog_frame_layout=CogFrameLayout.FULL_FRAME) + for fmt_in in [CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB]: + for fmt_out in [CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB]: + with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=fmt_in, + width=16, height=16, cog_frame_layout=CogFrameLayout.FULL_FRAME) - self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) - self.write_test_pattern(grain_in) + self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) + self.write_test_pattern(grain_in) - grain_out = grain_in.convert(fmt_out) + grain_out = grain_in.convert(fmt_out) - self.assertIsVideoGrain(fmt_out, width=16, height=16)(grain_out) - self.assertMatchesTestPattern(grain_out) + self.assertIsVideoGrain(fmt_out, width=16, height=16)(grain_out) + self.assertMatchesTestPattern(grain_out) def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From b9262d801fa06253fdf173b44eef910328eee284 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 15:33:38 +0100 Subject: [PATCH 55/76] numpy.convert: conversion between RGB formats, and between YUV formats, but not between the two. No v210 --- mediagrains/numpy.py | 4 +- mediagrains_py36/numpy/__init__.py | 227 +---------------------- mediagrains_py36/numpy/convert.py | 167 +++++++++++++++-- mediagrains_py36/numpy/videograin.py | 265 +++++++++++++++++++++++++++ tests/test36_numpy_videograin.py | 42 +++-- 5 files changed, 456 insertions(+), 249 deletions(-) create mode 100644 mediagrains_py36/numpy/videograin.py diff --git a/mediagrains/numpy.py b/mediagrains/numpy.py index 496b9d6..1e1e6e3 100644 --- a/mediagrains/numpy.py +++ b/mediagrains/numpy.py @@ -22,8 +22,8 @@ from sys import version_info if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - from mediagrains_py36.numpy import VideoGrain, VIDEOGRAIN # noqa: F401 + from mediagrains_py36.numpy import VideoGrain, VIDEOGRAIN, flow_id_for_converted_flow # noqa: F401 - __all__ = ['VideoGrain', 'VIDEOGRAIN'] + __all__ = ['VideoGrain', 'VIDEOGRAIN', "flow_id_for_converted_flow"] else: __all__ = [] diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index 0c01a3e..d4256dd 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -19,228 +19,7 @@ Library for handling mediagrains in numpy arrays """ -from mediagrains.cogenums import ( - CogFrameFormat, - COG_FRAME_IS_PACKED, - COG_FRAME_IS_COMPRESSED, - COG_FRAME_IS_PLANAR, - COG_FRAME_FORMAT_BYTES_PER_VALUE, - COG_FRAME_IS_PLANAR_RGB) -from mediagrains import grain as bytesgrain -from mediagrains import grain_constructors as bytesgrain_constructors -from copy import copy, deepcopy +from .videograin import VIDEOGRAIN, VideoGrain +from . convert import flow_id_for_converted_flow -from .convert import get_grain_conversion_function - -import numpy as np -from numpy.lib.stride_tricks import as_strided - -from enum import Enum, auto - -__all__ = ['VideoGrain', 'VIDEOGRAIN'] - - -def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: - """This method returns the numpy "data type" for a particular video format. - - For planar and padded formats this is the size of the native integer type that is used to handle the samples (eg. 8bit, 16bit, etc ...) - For weird packed formats like v210 (10-bit samples packed so that there are 3 10-bit samples in every 32-bit word) this is not possible. - Instead for v210 we return int32, since that is the most useful native data type that always corresponds to an integral number of samples. - """ - if COG_FRAME_IS_PLANAR(fmt): - if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: - return np.dtype(np.uint8) - elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 2: - return np.dtype(np.int16) - elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: - return np.dtype(np.int32) - elif fmt in [CogFrameFormat.UYVY, - CogFrameFormat.YUYV, - CogFrameFormat.AYUV, - CogFrameFormat.RGB, - CogFrameFormat.RGBx, - CogFrameFormat.RGBA, - CogFrameFormat.BGRx, - CogFrameFormat.BGRx, - CogFrameFormat.ARGB, - CogFrameFormat.xRGB, - CogFrameFormat.ABGR, - CogFrameFormat.xBGR]: - return np.dtype(np.uint8) - elif fmt == CogFrameFormat.v216: - return np.dtype(np.int16) - elif fmt == CogFrameFormat.v210: - return np.dtype(np.int32) - - raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") - - -class ComponentDataList(list): - class ComponentOrder (Enum): - YUV = auto() - RGB = auto() - BGR = auto() - X = auto() - - def __init__(self, data: list, arrangement: ComponentOrder=ComponentOrder.X): - super().__init__(data) - if arrangement == ComponentDataList.ComponentOrder.YUV: - self.Y = self[0] - self.U = self[1] - self.V = self[2] - elif arrangement == ComponentDataList.ComponentOrder.RGB: - self.R = self[0] - self.G = self[1] - self.B = self[2] - elif arrangement == ComponentDataList.ComponentOrder.BGR: - self.B = self[0] - self.G = self[1] - self.R = self[2] - - -def _component_arrangement_from_format(fmt: CogFrameFormat): - """This method returns the ordering of the components in the component data arrays that are used to represent a particular format. - - Note that for the likes of UYVY this will return YUV since the planes are represented in that order by the interface even though they - are interleved in the data. - - For formats where no meaningful component access can be provided (v210, compressed formats, etc ...) the value X is returned. - """ - if COG_FRAME_IS_PLANAR(fmt): - if COG_FRAME_IS_PLANAR_RGB(fmt): - return ComponentDataList.ComponentOrder.RGB - else: - return ComponentDataList.ComponentOrder.YUV - elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216, CogFrameFormat.AYUV]: - return ComponentDataList.ComponentOrder.YUV - elif fmt in [CogFrameFormat.RGB, CogFrameFormat.RGBA, CogFrameFormat.RGBx, CogFrameFormat.ARGB, CogFrameFormat.xRGB]: - return ComponentDataList.ComponentOrder.RGB - elif fmt in [CogFrameFormat.BGRA, CogFrameFormat.BGRx, CogFrameFormat.xBGR, CogFrameFormat.ABGR]: - return ComponentDataList.ComponentOrder.BGR - else: - return ComponentDataList.ComponentOrder.X - - -def _component_arrays_for_interleaved_422(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int): - return [ - as_strided(data0, - shape=(height, width), - strides=(stride, itemsize*2)).transpose(), - as_strided(data1, - shape=(height, width//2), - strides=(stride, itemsize*4)).transpose(), - as_strided(data2, - shape=(height, width//2), - strides=(stride, itemsize*4)).transpose()] - - -def _component_arrays_for_interleaved_444_take_three(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int, num_components: int = 3): - return [ - as_strided(data0, - shape=(height, width), - strides=(stride, itemsize*num_components)).transpose(), - as_strided(data1, - shape=(height, width), - strides=(stride, itemsize*num_components)).transpose(), - as_strided(data2, - shape=(height, width), - strides=(stride, itemsize*num_components)).transpose()] - - -def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): - """This method returns a list of numpy array views which can be used to directly access the components of the video frame - without any need for conversion or copying. This is not possible for all formats. - - For planar formats this simply returns a list of array views of the planes. - - For interleaved formats this returns a list of array views that use stride tricks to access alternate elements in the source data array. - - For weird packed formats like v210 nothing can be done, an empty list is returned since no individual component access is possible. - """ - if COG_FRAME_IS_PLANAR(fmt): - return [ - as_strided(data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize], - shape=(component.height, component.width), - strides=(component.stride, data.itemsize)).transpose() - for component in components] - elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: - # Either 8 or 16 bits 4:2:2 interleavedd in UYVY order - return _component_arrays_for_interleaved_422(data[1:], data, data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) - elif fmt == CogFrameFormat.YUYV: - # 8 bit 4:2:2 interleaved in YUYV order - return _component_arrays_for_interleaved_422(data, data[1:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize) - elif fmt == CogFrameFormat.RGB: - # 8 bit 4:4:4 three components interleaved in RGB order - return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) - elif fmt in [CogFrameFormat.RGBx, - CogFrameFormat.RGBA, - CogFrameFormat.BGRx, - CogFrameFormat.BGRx]: - # 8 bit 4:4:4:4 four components interleave dropping the fourth component - return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) - elif fmt in [CogFrameFormat.ARGB, - CogFrameFormat.xRGB, - CogFrameFormat.ABGR, - CogFrameFormat.xBGR, - CogFrameFormat.AYUV]: - # 8 bit 4:4:4:4 four components interleave dropping the first component - return _component_arrays_for_interleaved_444_take_three(data[1:], data[2:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) - elif fmt == CogFrameFormat.v210: - # v210 is barely supported. Convert it to something else to actually use it! - # This method returns an empty list because component access isn't supported, but - # the more basic access to the underlying data is. - return [] - - raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") - - -class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): - def __init__(self, meta, data): - super().__init__(meta, data) - self._data = np.frombuffer(self._data, dtype=_dtype_from_cogframeformat(self.format)) - self.component_data = ComponentDataList( - _component_arrays_for_data_and_type(self._data, self.format, self.components), - arrangement=_component_arrangement_from_format(self.format)) - - def __array__(self): - return np.array(self.data) - - def __bytes__(self): - return bytes(self.data) - - def __copy__(self): - return VideoGrain(copy(self.meta), self.data) - - def __deepcopy__(self, memo): - return VideoGrain(deepcopy(self.meta), self.data.copy()) - - def __repr__(self): - if self.data is None: - return "{}({!r})".format(self._factory, self.meta) - else: - return "{}({!r},< numpy data of length {} >)".format(self._factory, self.meta, len(self.data)) - - def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": - """Used to convert this grain to a different cog format. - - :param fmt: The format to convert to - :returns: A new grain of the specified format. Notably converting to the same format is the same as a deepcopy - :raises: NotImplementedError if the requested conversion is not possible - """ - if self.format == fmt: - return deepcopy(self) - else: - return get_grain_conversion_function(self.format, fmt)(self) - - -def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: - """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. - - Otherwise takes the same parameters as mediagrains.VideoGrain and returns the same grain converted into a mediagrains.numpy.VIDEOGRAIN - """ - if len(args) == 1 and isinstance(args[0], bytesgrain.VIDEOGRAIN): - rawgrain = args[0] - else: - rawgrain = bytesgrain_constructors.VideoGrain(*args, **kwargs) - - return VIDEOGRAIN(rawgrain.meta, rawgrain.data) +__all__ = ['VideoGrain', 'VIDEOGRAIN', "flow_id_for_converted_flow"] diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 2cb88ff..f00338d 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -19,24 +19,167 @@ Library for converting video grain formats represented as numpy arrays. """ -from mediagrains.cogenums import CogFrameFormat -from typing import Callable +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout +from typing import Callable, List +from uuid import uuid5, UUID +import numpy as np -__all__ = ["get_grain_conversion_function"] +from .videograin import VideoGrain, VIDEOGRAIN +__all__ = ["flow_id_for_converted_flow"] -grain_conversions = {} +def flow_id_for_converted_flow(source_id: UUID, fmt: CogFrameFormat) -> UUID: + return uuid5(source_id, "FORMAT_CONVERSION: {!r}".format(fmt)) -def grain_conversion(fmt_in: CogFrameFormat, fmt_out: CogFrameFormat): - def _inner(f: Callable[['VideoGrain'], 'VideoGrain']) -> None: - global grain_conversions - grain_conversions[(fmt_in, fmt_out)] = f + +def new_grain(grain: VIDEOGRAIN, fmt: CogFrameFormat): + return VideoGrain(grain.source_id, + flow_id_for_converted_flow(grain.source_id, fmt), + origin_timestamp=grain.origin_timestamp, + sync_timestamp=grain.sync_timestamp, + cog_frame_format=fmt, + width=grain.width, + height=grain.height, + rate=grain.rate, + duration=grain.duration, + cog_frame_layout=grain.layout) + + +# Some simple conversions can be acheived by just copying the data from one grain to the other with no +# clever work at all. All the cleverness is already present in the code that creates the component array views +# in the mediagrains +def _simple_copy_convert_yuv(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = grain_in.component_data.U + grain_out.component_data.V[:,:] = grain_in.component_data.V + + return grain_out + return _inner + + +def _simple_copy_convert_rgb(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + grain_out.component_data.R[:,:] = grain_in.component_data.R + grain_out.component_data.G[:,:] = grain_in.component_data.G + grain_out.component_data.B[:,:] = grain_in.component_data.B + + return grain_out + return _inner + + +def _int_array_mean(a: np.ndarray, b: np.ndarray) -> np.ndarray: + """This takes the mean of two arrays of integers without risking overflowing intermediate values.""" + return (a//2 + b//2) + ((a&0x1) | (b&0x1)) + +# Some conversions between YUV colour subsampling systems require a simple mean +def _simple_mean_convert_yuv444__yuv422(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + + grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[0::2, :], grain_in.component_data.U[1::2, :]) + grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[0::2, :], grain_in.component_data.V[1::2, :]) + + return grain_out return _inner -def get_grain_conversion_function(fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VideoGrain"], "VideoGrain"]: - if (fmt_in, fmt_out) in grain_conversions: - return grain_conversions[(fmt_in, fmt_out)] +def _simple_mean_convert_yuv422__yuv420(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + + grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[:, 0::2], grain_in.component_data.U[:, 1::2]) + grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[:, 0::2], grain_in.component_data.V[:, 1::2]) + + return grain_out + return _inner + +# Other conversions require duplicating samples +def _simple_duplicate_convert_yuv422__yuv444(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + + grain_out.component_data.U[0::2, :] = grain_in.component_data.U + grain_out.component_data.U[1::2, :] = grain_in.component_data.U + grain_out.component_data.V[0::2, :] = grain_in.component_data.V + grain_out.component_data.V[1::2, :] = grain_in.component_data.V + + return grain_out + return _inner + +def _simple_duplicate_convert_yuv420__yuv422(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + + grain_out.component_data.U[:, 0::2] = grain_in.component_data.U + grain_out.component_data.U[:, 1::2] = grain_in.component_data.U + grain_out.component_data.V[:, 0::2] = grain_in.component_data.V + grain_out.component_data.V[:, 1::2] = grain_in.component_data.V + + return grain_out + return _inner + + + +def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): + for i in range(0, len(fmts)): + for j in range(i+1, len(fmts)): + VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_yuv(fmts[j])) + VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_yuv(fmts[i])) + +def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat]): + for i in range(0, len(fmts)): + for j in range(i+1, len(fmts)): + VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_rgb(fmts[j])) + VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb(fmts[i])) + + +# 8bit 4:2:2 YUV formats +_register_simple_copy_conversions_for_formats_yuv([ + CogFrameFormat.YUYV, + CogFrameFormat.UYVY, + CogFrameFormat.U8_422]) + +# 8 bit RGB formats +_register_simple_copy_conversions_for_formats_rgb([ + CogFrameFormat.RGB, + CogFrameFormat.U8_444_RGB, + CogFrameFormat.RGBx, + CogFrameFormat.xRGB, + CogFrameFormat.BGRx, + CogFrameFormat.xBGR]) + +# 8 bit 4:4:4 YUV to 8 bit 4:2:2 YUV +for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) + +# 8 bit 4:2:2 YUV to 8 bit 4:2:0 YUV +for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.U8_420)) + +# 8 bit 4:4:4 YUV to 8 bit 4:2:0 YUV +VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, CogFrameFormat.U8_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.U8_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.U8_422)(grain))) + +# 8 bit 4:2:0 YUV to 8 bit 4:2:2 YUV +for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, fmt)(_simple_duplicate_convert_yuv420__yuv422(fmt)) + +# 8 bit 4:2:0 YUV to 8 bit 4:4:4 YUV +VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, CogFrameFormat.U8_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.U8_422)(grain))) - raise NotImplementedError("This conversion has not yet been implemented") \ No newline at end of file +# 8 bit 4:2:2 YUV to 8 bit 4:4:4 YUV +for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)) \ No newline at end of file diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py new file mode 100644 index 0000000..987253e --- /dev/null +++ b/mediagrains_py36/numpy/videograin.py @@ -0,0 +1,265 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for handling mediagrains in numpy arrays +""" + +from mediagrains.cogenums import ( + CogFrameFormat, + COG_FRAME_IS_PACKED, + COG_FRAME_IS_COMPRESSED, + COG_FRAME_IS_PLANAR, + COG_FRAME_FORMAT_BYTES_PER_VALUE, + COG_FRAME_IS_PLANAR_RGB) +from mediagrains import grain as bytesgrain +from mediagrains import grain_constructors as bytesgrain_constructors +from copy import copy, deepcopy + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from typing import Callable + +from enum import Enum, auto + + +__all__ = ['VideoGrain', 'VIDEOGRAIN'] + + +def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: + """This method returns the numpy "data type" for a particular video format. + + For planar and padded formats this is the size of the native integer type that is used to handle the samples (eg. 8bit, 16bit, etc ...) + For weird packed formats like v210 (10-bit samples packed so that there are 3 10-bit samples in every 32-bit word) this is not possible. + Instead for v210 we return int32, since that is the most useful native data type that always corresponds to an integral number of samples. + """ + if COG_FRAME_IS_PLANAR(fmt): + if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: + return np.dtype(np.uint8) + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 2: + return np.dtype(np.int16) + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: + return np.dtype(np.int32) + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.AYUV, + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return np.dtype(np.uint8) + elif fmt == CogFrameFormat.v216: + return np.dtype(np.int16) + elif fmt == CogFrameFormat.v210: + return np.dtype(np.int32) + + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + + +class ComponentDataList(list): + class ComponentOrder (Enum): + YUV = auto() + RGB = auto() + BGR = auto() + X = auto() + + def __init__(self, data: list, arrangement: ComponentOrder=ComponentOrder.X): + super().__init__(data) + if arrangement == ComponentDataList.ComponentOrder.YUV: + self.Y = self[0] + self.U = self[1] + self.V = self[2] + elif arrangement == ComponentDataList.ComponentOrder.RGB: + self.R = self[0] + self.G = self[1] + self.B = self[2] + elif arrangement == ComponentDataList.ComponentOrder.BGR: + self.B = self[0] + self.G = self[1] + self.R = self[2] + + +def _component_arrangement_from_format(fmt: CogFrameFormat): + """This method returns the ordering of the components in the component data arrays that are used to represent a particular format. + + Note that for the likes of UYVY this will return YUV since the planes are represented in that order by the interface even though they + are interleved in the data. + + For formats where no meaningful component access can be provided (v210, compressed formats, etc ...) the value X is returned. + """ + if COG_FRAME_IS_PLANAR(fmt): + if COG_FRAME_IS_PLANAR_RGB(fmt): + return ComponentDataList.ComponentOrder.RGB + else: + return ComponentDataList.ComponentOrder.YUV + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216, CogFrameFormat.AYUV]: + return ComponentDataList.ComponentOrder.YUV + elif fmt in [CogFrameFormat.RGB, CogFrameFormat.RGBA, CogFrameFormat.RGBx, CogFrameFormat.ARGB, CogFrameFormat.xRGB]: + return ComponentDataList.ComponentOrder.RGB + elif fmt in [CogFrameFormat.BGRA, CogFrameFormat.BGRx, CogFrameFormat.xBGR, CogFrameFormat.ABGR]: + return ComponentDataList.ComponentOrder.BGR + else: + return ComponentDataList.ComponentOrder.X + + +def _component_arrays_for_interleaved_422(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int): + return [ + as_strided(data0, + shape=(height, width), + strides=(stride, itemsize*2)).transpose(), + as_strided(data1, + shape=(height, width//2), + strides=(stride, itemsize*4)).transpose(), + as_strided(data2, + shape=(height, width//2), + strides=(stride, itemsize*4)).transpose()] + + +def _component_arrays_for_interleaved_444_take_three(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int, num_components: int = 3): + return [ + as_strided(data0, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose(), + as_strided(data1, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose(), + as_strided(data2, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose()] + + +def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): + """This method returns a list of numpy array views which can be used to directly access the components of the video frame + without any need for conversion or copying. This is not possible for all formats. + + For planar formats this simply returns a list of array views of the planes. + + For interleaved formats this returns a list of array views that use stride tricks to access alternate elements in the source data array. + + For weird packed formats like v210 nothing can be done, an empty list is returned since no individual component access is possible. + """ + if COG_FRAME_IS_PLANAR(fmt): + return [ + as_strided(data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize], + shape=(component.height, component.width), + strides=(component.stride, data.itemsize)).transpose() + for component in components] + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: + # Either 8 or 16 bits 4:2:2 interleavedd in UYVY order + return _component_arrays_for_interleaved_422(data[1:], data, data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) + elif fmt == CogFrameFormat.YUYV: + # 8 bit 4:2:2 interleaved in YUYV order + return _component_arrays_for_interleaved_422(data, data[1:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize) + elif fmt == CogFrameFormat.RGB: + # 8 bit 4:4:4 three components interleaved in RGB order + return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + # 8 bit 4:4:4:4 four components interleave dropping the fourth component + return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR, + CogFrameFormat.AYUV]: + # 8 bit 4:4:4:4 four components interleave dropping the first component + return _component_arrays_for_interleaved_444_take_three(data[1:], data[2:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) + elif fmt == CogFrameFormat.v210: + # v210 is barely supported. Convert it to something else to actually use it! + # This method returns an empty list because component access isn't supported, but + # the more basic access to the underlying data is. + return [] + + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + + +class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): + _grain_conversions = {} + + def __init__(self, meta, data): + super().__init__(meta, data) + self._data = np.frombuffer(self._data, dtype=_dtype_from_cogframeformat(self.format)) + self.component_data = ComponentDataList( + _component_arrays_for_data_and_type(self._data, self.format, self.components), + arrangement=_component_arrangement_from_format(self.format)) + + def __array__(self): + return np.array(self.data) + + def __bytes__(self): + return bytes(self.data) + + def __copy__(self): + return VideoGrain(copy(self.meta), self.data) + + def __deepcopy__(self, memo): + return VideoGrain(deepcopy(self.meta), self.data.copy()) + + def __repr__(self): + if self.data is None: + return "{}({!r})".format(self._factory, self.meta) + else: + return "{}({!r},< numpy data of length {} >)".format(self._factory, self.meta, len(self.data)) + + @classmethod + def grain_conversion(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat): + """Decorator to apply to all grain conversion functions""" + def _inner(f: Callable[[cls], cls]) -> None: + cls._grain_conversions[(fmt_in, fmt_out)] = f + return f + return _inner + + @classmethod + def _get_grain_conversion_function(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VIDEOGRAIN"], "VIDEOGRAIN"]: + """Return the registered grain conversion function for a specified type conversion, or raise NotImplementedError""" + if (fmt_in, fmt_out) in cls._grain_conversions: + return cls._grain_conversions[(fmt_in, fmt_out)] + + raise NotImplementedError("This conversion has not yet been implemented") + + def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Used to convert this grain to a different cog format. + + :param fmt: The format to convert to + :returns: A new grain of the specified format. Notably converting to the same format is the same as a deepcopy + :raises: NotImplementedError if the requested conversion is not possible + """ + if self.format == fmt: + return deepcopy(self) + else: + return self.__class__._get_grain_conversion_function(self.format, fmt)(self) + + +def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: + """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. + + Otherwise takes the same parameters as mediagrains.VideoGrain and returns the same grain converted into a mediagrains.numpy.VIDEOGRAIN + """ + if len(args) == 1 and isinstance(args[0], bytesgrain.VIDEOGRAIN): + rawgrain = args[0] + else: + rawgrain = bytesgrain_constructors.VideoGrain(*args, **kwargs) + + return VIDEOGRAIN(rawgrain.meta, rawgrain.data) diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 1881820..6109e11 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -19,6 +19,7 @@ import uuid from mediagrains.numpy import VideoGrain +from mediagrains.numpy import flow_id_for_converted_flow from mediagrains.cogenums import ( CogFrameFormat, CogFrameLayout, @@ -189,7 +190,8 @@ def assertIsVideoGrain(self, cts=Timestamp.from_tai_sec_nsec("417798915:0"), rate=Fraction(25, 1), width=1920, - height=1080): + height=1080, + ignore_cts=False): def __inner(grain): self.assertEqual(grain.grain_type, "video") self.assertEqual(grain.source_id, src_id) @@ -198,7 +200,8 @@ def __inner(grain): self.assertEqual(grain.final_origin_timestamp(), ots) self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) - self.assertEqual(grain.creation_timestamp, cts) + if not ignore_cts: + self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, rate) self.assertEqual(grain.duration, 1/rate) self.assertEqual(grain.timelabels, []) @@ -383,21 +386,24 @@ def write_test_pattern(self, grain): grain.component_data.U[:, :] = U grain.component_data.V[:, :] = V + def assertArrayEqual(self, a: np.ndarray, b: np.ndarray): + self.assertTrue(np.array_equal(a, b), msg="{} != {}".format(a, b)) + def assertMatchesTestPattern(self, grain): fmt = grain.format if self._is_rgb(fmt): (R, G, B) = self._test_pattern_rgb(fmt) - self.assertTrue(np.array_equal(grain.component_data.R[:, :], R)) - self.assertTrue(np.array_equal(grain.component_data.G[:, :], G)) - self.assertTrue(np.array_equal(grain.component_data.B[:, :], B)) + self.assertArrayEqual(grain.component_data.R[:, :], R) + self.assertArrayEqual(grain.component_data.G[:, :], G) + self.assertArrayEqual(grain.component_data.B[:, :], B) else: (Y, U, V) = self._test_pattern_yuv(fmt) - self.assertTrue(np.array_equal(grain.component_data.Y[:, :], Y)) - self.assertTrue(np.array_equal(grain.component_data.U[:, :], U)) - self.assertTrue(np.array_equal(grain.component_data.V[:, :], V)) + self.assertArrayEqual(grain.component_data.Y[:, :], Y) + self.assertArrayEqual(grain.component_data.U[:, :], U) + self.assertArrayEqual(grain.component_data.V[:, :], V) def test_video_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") @@ -456,8 +462,18 @@ def test_video_grain_convert(self): ots = Timestamp.from_tai_sec_nsec("417798915:5") sts = Timestamp.from_tai_sec_nsec("417798915:10") - for fmt_in in [CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB]: - for fmt_out in [CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB]: + + def pairs_from(fmts): + for fmt_in in fmts: + for fmt_out in fmts: + yield (fmt_in, fmt_out) + + # This checks conversions within YUV and RGB space, but not conversions between the two + for fmts in [ + [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420], # All YUV 8bit formats + [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR] # All 8-bit 3 component RGB formats + ]: + for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, @@ -469,7 +485,11 @@ def test_video_grain_convert(self): grain_out = grain_in.convert(fmt_out) - self.assertIsVideoGrain(fmt_out, width=16, height=16)(grain_out) + if fmt_in != fmt_out: + flow_id_out = flow_id_for_converted_flow(src_id, fmt_out) + else: + flow_id_out = flow_id + self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) self.assertMatchesTestPattern(grain_out) def test_video_grain_create_discontiguous(self): From c219f0a3893944d14a8f221129c7d92919538e06 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 15:54:06 +0100 Subject: [PATCH 56/76] numpy.convert: Added 16-bit YUV format conversions --- mediagrains_py36/numpy/convert.py | 30 ++++++++++++++++++++++++++++-- tests/test36_numpy_videograin.py | 12 +++++++----- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index f00338d..651d40f 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -147,11 +147,14 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb(fmts[i])) -# 8bit 4:2:2 YUV formats +# 4:2:2 YUV formats _register_simple_copy_conversions_for_formats_yuv([ CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_422]) +_register_simple_copy_conversions_for_formats_yuv([ + CogFrameFormat.v216, + CogFrameFormat.S16_422]) # 8 bit RGB formats _register_simple_copy_conversions_for_formats_rgb([ @@ -182,4 +185,27 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] # 8 bit 4:2:2 YUV to 8 bit 4:4:4 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)) \ No newline at end of file + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)) + + +# 16 bit 4:4:4 YUV to 16 bit 4:2:2 YUV +for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: + VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) + +# 16 bit 4:2:2 YUV to 16 bit 4:2:0 YUV +for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420)) + +# 16 bit 4:4:4 YUV to 16 bit 4:2:0 YUV +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, CogFrameFormat.S16_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422)(grain))) + +# 16 bit 4:2:0 YUV to 16 bit 4:2:2 YUV +for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: + VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, fmt)(_simple_duplicate_convert_yuv420__yuv422(fmt)) + +# 16 bit 4:2:0 YUV to 16 bit 4:4:4 YUV +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, CogFrameFormat.S16_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422)(grain))) + +# 16 bit 4:2:2 YUV to 16 bit 4:4:4 YUV +for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 6109e11..e411176 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -20,6 +20,7 @@ import uuid from mediagrains.numpy import VideoGrain from mediagrains.numpy import flow_id_for_converted_flow +from mediagrains_py36.numpy.videograin import _dtype_from_cogframeformat from mediagrains.cogenums import ( CogFrameFormat, CogFrameLayout, @@ -348,9 +349,9 @@ def _test_pattern_rgb(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray bd = self._get_bitdepth(fmt) v = (1 << (bd - 2))*3 - return (np.array([[v, v, v, v, 0, 0, 0, 0, v, v, v, v, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose(), - np.array([[v, v, v, v, v, v, v, v, 0, 0, 0, 0, 0, 0, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose(), - np.array([[v, v, 0, 0, v, v, 0, 0, v, v, 0, 0, v, v, 0, 0] for _ in range(0, 16)], dtype=np.dtype(np.int32)).transpose()) + return (np.array([[v, v, v, v, 0, 0, 0, 0, v, v, v, v, 0, 0, 0, 0] for _ in range(0, 16)], dtype=_dtype_from_cogframeformat(fmt)).transpose(), + np.array([[v, v, v, v, v, v, v, v, 0, 0, 0, 0, 0, 0, 0, 0] for _ in range(0, 16)], dtype=_dtype_from_cogframeformat(fmt)).transpose(), + np.array([[v, v, 0, 0, v, v, 0, 0, v, v, 0, 0, v, v, 0, 0] for _ in range(0, 16)], dtype=_dtype_from_cogframeformat(fmt)).transpose()) def _test_pattern_yuv(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: (R, G, B) = self._test_pattern_rgb(fmt) @@ -368,7 +369,7 @@ def _test_pattern_yuv(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray U = (U[:, 0::2] + U[:, 1::2])/2 V = (V[:, 0::2] + V[:, 1::2])/2 - return (np.around(Y), np.around(U), np.around(V)) + return (np.around(Y).astype(_dtype_from_cogframeformat(fmt)), np.around(U).astype(_dtype_from_cogframeformat(fmt)), np.around(V).astype(_dtype_from_cogframeformat(fmt))) def write_test_pattern(self, grain): fmt = grain.format @@ -471,7 +472,8 @@ def pairs_from(fmts): # This checks conversions within YUV and RGB space, but not conversions between the two for fmts in [ [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420], # All YUV 8bit formats - [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR] # All 8-bit 3 component RGB formats + [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR], # All 8-bit 3 component RGB formats + [CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420], # All YUV 16bit formats ]: for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): From 749ad6f30af9e26f466268f5dd0a10093cceb27b Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 16:00:51 +0100 Subject: [PATCH 57/76] numpy.convert: Added 10-bit YUV format conversions --- mediagrains_py36/numpy/convert.py | 21 ++++++++++++++++----- tests/test36_numpy_videograin.py | 1 + 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 651d40f..4eb7c2a 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -147,14 +147,11 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb(fmts[i])) -# 4:2:2 YUV formats +# 4:2:2 8 bit YUV formats _register_simple_copy_conversions_for_formats_yuv([ CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_422]) -_register_simple_copy_conversions_for_formats_yuv([ - CogFrameFormat.v216, - CogFrameFormat.S16_422]) # 8 bit RGB formats _register_simple_copy_conversions_for_formats_rgb([ @@ -188,6 +185,11 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)) +# 4:2:2 16 bit YUV formats +_register_simple_copy_conversions_for_formats_yuv([ + CogFrameFormat.v216, + CogFrameFormat.S16_422]) + # 16 bit 4:4:4 YUV to 16 bit 4:2:2 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) @@ -208,4 +210,13 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] # 16 bit 4:2:2 YUV to 16 bit 4:4:4 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)) \ No newline at end of file + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)) + + +# 10 bit conversions +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_10BIT)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_10BIT)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_420_10BIT)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_10BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_10BIT)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_422_10BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_10BIT)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_444_10BIT)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_10BIT)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index e411176..e37f5e6 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -474,6 +474,7 @@ def pairs_from(fmts): [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420], # All YUV 8bit formats [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR], # All 8-bit 3 component RGB formats [CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420], # All YUV 16bit formats + [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT], # All YUV 10bit formats except for v210 ]: for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): From 4ecba8edf3209ab3a1ff64f7ec65a0e0db9d9db9 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 16:04:18 +0100 Subject: [PATCH 58/76] numpy.convert: Added 12-bit YUV format conversions --- mediagrains_py36/numpy/convert.py | 10 +++++++++- tests/test36_numpy_videograin.py | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 4eb7c2a..b884aef 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -219,4 +219,12 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_420_10BIT)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_10BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_10BIT)(grain))) VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_422_10BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_10BIT)) VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_444_10BIT)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_10BIT)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)) \ No newline at end of file +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)) + +# 12 bit conversions +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_12BIT)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_12BIT)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_420_12BIT)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_12BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_12BIT)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_12BIT, CogFrameFormat.S16_422_12BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_12BIT)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_12BIT, CogFrameFormat.S16_444_12BIT)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_12BIT)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index e37f5e6..7052d05 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -475,6 +475,7 @@ def pairs_from(fmts): [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR], # All 8-bit 3 component RGB formats [CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420], # All YUV 16bit formats [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT], # All YUV 10bit formats except for v210 + [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT], # All YUV 12bit formats ]: for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): From 97be7115bfc0a214208dfc4ab99d48d9f3e2286c Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 16:09:02 +0100 Subject: [PATCH 59/76] numpy.convert: Added 32-bit YUV format conversions --- mediagrains_py36/numpy/convert.py | 10 +++++++++- tests/test36_numpy_videograin.py | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index b884aef..d9249a2 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -227,4 +227,12 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_420_12BIT)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_12BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_12BIT)(grain))) VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_12BIT, CogFrameFormat.S16_422_12BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_12BIT)) VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_12BIT, CogFrameFormat.S16_444_12BIT)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_12BIT)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)) \ No newline at end of file +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)) + +# 32 bit conversions +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_444, CogFrameFormat.S32_422)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S32_422)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_422, CogFrameFormat.S32_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S32_420)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_444, CogFrameFormat.S32_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S32_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S32_422)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_420, CogFrameFormat.S32_422)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S32_422)) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_420, CogFrameFormat.S32_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S32_422)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_422, CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 7052d05..aa122d9 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -476,6 +476,7 @@ def pairs_from(fmts): [CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420], # All YUV 16bit formats [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT], # All YUV 10bit formats except for v210 [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT], # All YUV 12bit formats + [CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420], # All YUV 32bit formats ]: for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): From 329a37bcceccfc4f2bdd624b4d118c16e7989605 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 15 Oct 2019 18:05:19 +0100 Subject: [PATCH 60/76] numpy.convert: conversion between bitdepths (as long as no other conversion occuring at the same time) --- mediagrains/cogenums.py | 18 ++++++++- mediagrains_py36/numpy/convert.py | 55 +++++++++++++++++++++++++++- mediagrains_py36/numpy/videograin.py | 8 ++-- tests/test36_numpy_videograin.py | 45 ++++++++++++++++------- 4 files changed, 105 insertions(+), 21 deletions(-) diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index b5c5aa3..7b52cec 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -23,7 +23,7 @@ # that python code using it is compatible with this library when specifying # video and audio formats. -from enum import IntEnum +from enum import IntEnum, Enum __all__ = [ 'CogFrameFormat', @@ -125,6 +125,22 @@ class CogAudioFormat(IntEnum): INVALID = 0xffffffff +class PlanarChromaFormat(IntEnum): + YUV_444 = 0x00 + YUV_422 = 0x01 + YUV_420 = 0x03 + RGB = 0x10 + + +def COG_PLANAR_FORMAT(chroma, depth): + if depth <= 8: + return CogFrameFormat(0 + chroma + (depth << 10)) + elif depth > 16: + return CogFrameFormat(8 + chroma + (depth << 10)) + else: + return CogFrameFormat(4 + chroma + (depth << 10)) + + def COG_FRAME_IS_PACKED(fmt): return ((fmt >> 8) & 0x1) != 0 diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index d9249a2..6d0f4dd 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -19,10 +19,13 @@ Library for converting video grain formats represented as numpy arrays. """ -from mediagrains.cogenums import CogFrameFormat, CogFrameLayout +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, COG_FRAME_FORMAT_ACTIVE_BITS, COG_PLANAR_FORMAT, PlanarChromaFormat from typing import Callable, List from uuid import uuid5, UUID import numpy as np +import numpy.random as npr + +from pdb import set_trace from .videograin import VideoGrain, VIDEOGRAIN @@ -133,6 +136,42 @@ def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: return _inner +# Bit depth conversions +def _unbiased_right_shift(a: np.ndarray, n: int) -> np.ndarray: + return (a >> n) + ((a >> (n - 1))&0x1) + +def _bitdepth_down_convert(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(fmt) + + grain_out.component_data[0][:] = _unbiased_right_shift(grain_in.component_data[0][:], bitshift) + grain_out.component_data[1][:] = _unbiased_right_shift(grain_in.component_data[1][:], bitshift) + grain_out.component_data[2][:] = _unbiased_right_shift(grain_in.component_data[2][:], bitshift) + + return grain_out + return _inner + +def _noisy_left_shift(a: np.ndarray, n: int) -> np.ndarray: + rando = ((npr.random_sample(a.shape) * (1 << n)).astype(a.dtype)) & ((1 << n) - 1) + return (a << n) + rando + +def _bitdepth_up_convert(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(fmt) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + + dt = grain_out.component_data[0].dtype + + grain_out.component_data[0][:] = _noisy_left_shift(grain_in.component_data[0][:].astype(dt), bitshift) + grain_out.component_data[1][:] = _noisy_left_shift(grain_in.component_data[1][:].astype(dt), bitshift) + grain_out.component_data[2][:] = _noisy_left_shift(grain_in.component_data[2][:].astype(dt), bitshift) + + return grain_out + return _inner + def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): for i in range(0, len(fmts)): @@ -235,4 +274,16 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_444, CogFrameFormat.S32_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S32_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S32_422)(grain))) VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_420, CogFrameFormat.S32_422)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S32_422)) VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_420, CogFrameFormat.S32_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S32_422)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_422, CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)) \ No newline at end of file +VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_422, CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)) + + +# Bit depth conversions +def distinct_pairs_from(vals): + for i in range(0, len(vals)): + for j in range(i + 1, len(vals)): + yield (vals[i], vals[j]) + +for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: + for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d2), COG_PLANAR_FORMAT(ss, d1))(_bitdepth_down_convert(COG_PLANAR_FORMAT(ss, d1))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert(COG_PLANAR_FORMAT(ss, d2))) \ No newline at end of file diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py index 987253e..300055e 100644 --- a/mediagrains_py36/numpy/videograin.py +++ b/mediagrains_py36/numpy/videograin.py @@ -52,9 +52,9 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: return np.dtype(np.uint8) elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 2: - return np.dtype(np.int16) + return np.dtype(np.uint16) elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: - return np.dtype(np.int32) + return np.dtype(np.uint32) elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.AYUV, @@ -69,9 +69,9 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: CogFrameFormat.xBGR]: return np.dtype(np.uint8) elif fmt == CogFrameFormat.v216: - return np.dtype(np.int16) + return np.dtype(np.uint16) elif fmt == CogFrameFormat.v210: - return np.dtype(np.int32) + return np.dtype(np.uint32) raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index aa122d9..44767e6 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -18,7 +18,7 @@ from unittest import TestCase import uuid -from mediagrains.numpy import VideoGrain +from mediagrains.numpy import VideoGrain, VIDEOGRAIN from mediagrains.numpy import flow_id_for_converted_flow from mediagrains_py36.numpy.videograin import _dtype_from_cogframeformat from mediagrains.cogenums import ( @@ -36,7 +36,7 @@ import mock from fractions import Fraction from copy import copy, deepcopy -from typing import Tuple +from typing import Tuple, Optional import numpy as np @@ -298,9 +298,9 @@ def __inner(grain): if bps == 1: dtype = np.dtype(np.uint8) elif bps == 2: - dtype = np.dtype(np.int16) + dtype = np.dtype(np.uint16) elif bps == 4: - dtype = np.dtype(np.int32) + dtype = np.dtype(np.uint32) else: raise Exception() @@ -387,24 +387,30 @@ def write_test_pattern(self, grain): grain.component_data.U[:, :] = U grain.component_data.V[:, :] = V - def assertArrayEqual(self, a: np.ndarray, b: np.ndarray): - self.assertTrue(np.array_equal(a, b), msg="{} != {}".format(a, b)) + def assertArrayEqual(self, a: np.ndarray, b: np.ndarray, max_diff: Optional[int] = None): + if max_diff is None: + self.assertTrue(np.array_equal(a, b), msg="{} != {}".format(a, b)) + else: + a = a.astype(np.dtype(np.int32)) + b = b.astype(np.dtype(np.int32)) + self.assertTrue(np.amax(np.absolute(a - b)) <= max_diff, + msg="{} - {} = {} (allowing up to {} difference)".format(a, b, a - b, max_diff)) - def assertMatchesTestPattern(self, grain): + def assertMatchesTestPattern(self, grain: VIDEOGRAIN, max_diff: Optional[int] = None): fmt = grain.format if self._is_rgb(fmt): (R, G, B) = self._test_pattern_rgb(fmt) - self.assertArrayEqual(grain.component_data.R[:, :], R) - self.assertArrayEqual(grain.component_data.G[:, :], G) - self.assertArrayEqual(grain.component_data.B[:, :], B) + self.assertArrayEqual(grain.component_data.R[:, :], R, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.G[:, :], G, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.B[:, :], B, max_diff=max_diff) else: (Y, U, V) = self._test_pattern_yuv(fmt) - self.assertArrayEqual(grain.component_data.Y[:, :], Y) - self.assertArrayEqual(grain.component_data.U[:, :], U) - self.assertArrayEqual(grain.component_data.V[:, :], V) + self.assertArrayEqual(grain.component_data.Y[:, :], Y, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.U[:, :], U, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.V[:, :], V, max_diff=max_diff) def test_video_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") @@ -477,6 +483,10 @@ def pairs_from(fmts): [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT], # All YUV 10bit formats except for v210 [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT], # All YUV 12bit formats [CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420], # All YUV 32bit formats + [CogFrameFormat.S32_444, CogFrameFormat.S16_444, CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_444_12BIT, CogFrameFormat.U8_444], # Bitdepth conversion + [CogFrameFormat.S32_422, CogFrameFormat.S16_422, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.U8_422], # Bitdepth conversion + [CogFrameFormat.S32_420, CogFrameFormat.S16_420, CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_420_12BIT, CogFrameFormat.U8_420], # Bitdepth conversion + [CogFrameFormat.S32_444_RGB, CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.U8_444_RGB], # Bitdepth conversion ]: for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): @@ -495,7 +505,14 @@ def pairs_from(fmts): else: flow_id_out = flow_id self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) - self.assertMatchesTestPattern(grain_out) + + # If we've converted from a different bit-depth we need to ignore rounding errors + if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): + self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) + elif self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in): + self.assertMatchesTestPattern(grain_out, max_diff=1) + else: + self.assertMatchesTestPattern(grain_out) def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From 2784d3b170ec15c31e6a11ca1653a21372f352ed Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 16 Oct 2019 14:06:40 +0100 Subject: [PATCH 61/76] numpy.convert: conversion between bitdepths and subsampling at the same time --- mediagrains_py36/numpy/convert.py | 28 +++++++++++++++++++++++++++- tests/test36_numpy_videograin.py | 6 +++--- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 6d0f4dd..4ede812 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -283,7 +283,33 @@ def distinct_pairs_from(vals): for j in range(i + 1, len(vals)): yield (vals[i], vals[j]) +# Compose two conversion functions together +def compose(first: Callable[[VIDEOGRAIN], VIDEOGRAIN], second: Callable[[VIDEOGRAIN], VIDEOGRAIN]) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + return lambda grain: second(first(grain)) + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d2), COG_PLANAR_FORMAT(ss, d1))(_bitdepth_down_convert(COG_PLANAR_FORMAT(ss, d1))) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert(COG_PLANAR_FORMAT(ss, d2))) \ No newline at end of file + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert(COG_PLANAR_FORMAT(ss, d2))) + + +# We have a number of transformations that aren't supported directly, but are via an intermediate format +# Bit depth and chroma combination conversions +for (ss1, ss2) in distinct_pairs_from([PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]): + for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss1, d1), COG_PLANAR_FORMAT(ss2, d2))( + compose( + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d1), COG_PLANAR_FORMAT(ss2, d1)), + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss2, d2)))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss2, d1))( + compose( + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss2, d2)), + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss2, d1)))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss1, d2))( + compose( + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss2, d2)), + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d2)))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d1))( + compose( + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d2)), + VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss1, d1)))) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 44767e6..47a430f 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -483,9 +483,9 @@ def pairs_from(fmts): [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT], # All YUV 10bit formats except for v210 [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT], # All YUV 12bit formats [CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420], # All YUV 32bit formats - [CogFrameFormat.S32_444, CogFrameFormat.S16_444, CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_444_12BIT, CogFrameFormat.U8_444], # Bitdepth conversion - [CogFrameFormat.S32_422, CogFrameFormat.S16_422, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.U8_422], # Bitdepth conversion - [CogFrameFormat.S32_420, CogFrameFormat.S16_420, CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_420_12BIT, CogFrameFormat.U8_420], # Bitdepth conversion + [CogFrameFormat.S32_444, CogFrameFormat.S16_444, CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_444_12BIT, CogFrameFormat.U8_444, # Bitdepth conversion + CogFrameFormat.S32_422, CogFrameFormat.S16_422, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.U8_422, # Bitdepth conversion + CogFrameFormat.S32_420, CogFrameFormat.S16_420, CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_420_12BIT, CogFrameFormat.U8_420], # Bitdepth conversion [CogFrameFormat.S32_444_RGB, CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.U8_444_RGB], # Bitdepth conversion ]: for (fmt_in, fmt_out) in pairs_from(fmts): From 394b41d2d5797b3376abde36d4dcf4bf5d65979b Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 16 Oct 2019 14:51:14 +0100 Subject: [PATCH 62/76] numpy.convert: colourspace conversion --- mediagrains_py36/numpy/convert.py | 73 ++++++++++++++++++++++++---- mediagrains_py36/numpy/videograin.py | 2 +- tests/test36_numpy_videograin.py | 21 +++++--- 3 files changed, 80 insertions(+), 16 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 4ede812..c42f2d8 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -32,6 +32,17 @@ __all__ = ["flow_id_for_converted_flow"] +def distinct_pairs_from(vals): + for i in range(0, len(vals)): + for j in range(i + 1, len(vals)): + yield (vals[i], vals[j]) + + +def compose(first: Callable[[VIDEOGRAIN], VIDEOGRAIN], second: Callable[[VIDEOGRAIN], VIDEOGRAIN]) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + """Compose two conversion functions together""" + return lambda grain: second(first(grain)) + + def flow_id_for_converted_flow(source_id: UUID, fmt: CogFrameFormat) -> UUID: return uuid5(source_id, "FORMAT_CONVERSION: {!r}".format(fmt)) @@ -80,6 +91,7 @@ def _int_array_mean(a: np.ndarray, b: np.ndarray) -> np.ndarray: """This takes the mean of two arrays of integers without risking overflowing intermediate values.""" return (a//2 + b//2) + ((a&0x1) | (b&0x1)) + # Some conversions between YUV colour subsampling systems require a simple mean def _simple_mean_convert_yuv444__yuv422(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: @@ -173,6 +185,39 @@ def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: return _inner +# Colourspace conversions (based on rec.709) +def _convert_rgb_to_yuv444(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) + (R, G, B) = (grain_in.component_data.R, + grain_in.component_data.G, + grain_in.component_data.B) + + grain_out.component_data.Y[:,:] = (R*0.2126 + G*0.7152 + B*0.0722) + grain_out.component_data.U[:,:] = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))) + grain_out.component_data.V[:,:] = (R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))) + + return grain_out + return _inner + +def _convert_yuv444_to_rgb(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: + def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: + grain_out = new_grain(grain_in, fmt) + bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + (Y, U, V) = (grain_in.component_data.Y.astype(np.dtype(np.double)), + grain_in.component_data.U.astype(np.dtype(np.double)) - (1 << (bd - 1)), + grain_in.component_data.V.astype(np.dtype(np.double)) - (1 << (bd - 1))) + + grain_out.component_data.R[:,:] = (Y + V*1.5748) + grain_out.component_data.G[:,:] = (Y - U*0.187324 - V*0.468124) + grain_out.component_data.B[:,:] = (Y + U*1.8556) + + return grain_out + return _inner + + +# These methods automate the process of registering simple copy conversions def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): for i in range(0, len(fmts)): for j in range(i+1, len(fmts)): @@ -192,6 +237,7 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] CogFrameFormat.UYVY, CogFrameFormat.U8_422]) + # 8 bit RGB formats _register_simple_copy_conversions_for_formats_rgb([ CogFrameFormat.RGB, @@ -201,24 +247,30 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] CogFrameFormat.BGRx, CogFrameFormat.xBGR]) + # 8 bit 4:4:4 YUV to 8 bit 4:2:2 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) + # 8 bit 4:2:2 YUV to 8 bit 4:2:0 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.U8_420)) + # 8 bit 4:4:4 YUV to 8 bit 4:2:0 YUV VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, CogFrameFormat.U8_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.U8_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.U8_422)(grain))) + # 8 bit 4:2:0 YUV to 8 bit 4:2:2 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, fmt)(_simple_duplicate_convert_yuv420__yuv422(fmt)) + # 8 bit 4:2:0 YUV to 8 bit 4:4:4 YUV VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, CogFrameFormat.U8_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.U8_422)(grain))) + # 8 bit 4:2:2 YUV to 8 bit 4:4:4 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)) @@ -229,24 +281,30 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] CogFrameFormat.v216, CogFrameFormat.S16_422]) + # 16 bit 4:4:4 YUV to 16 bit 4:2:2 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) + # 16 bit 4:2:2 YUV to 16 bit 4:2:0 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420)) + # 16 bit 4:4:4 YUV to 16 bit 4:2:0 YUV VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, CogFrameFormat.S16_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422)(grain))) + # 16 bit 4:2:0 YUV to 16 bit 4:2:2 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, fmt)(_simple_duplicate_convert_yuv420__yuv422(fmt)) + # 16 bit 4:2:0 YUV to 16 bit 4:4:4 YUV VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, CogFrameFormat.S16_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422)(grain))) + # 16 bit 4:2:2 YUV to 16 bit 4:4:4 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)) @@ -278,21 +336,18 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] # Bit depth conversions -def distinct_pairs_from(vals): - for i in range(0, len(vals)): - for j in range(i + 1, len(vals)): - yield (vals[i], vals[j]) - -# Compose two conversion functions together -def compose(first: Callable[[VIDEOGRAIN], VIDEOGRAIN], second: Callable[[VIDEOGRAIN], VIDEOGRAIN]) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - return lambda grain: second(first(grain)) - for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d2), COG_PLANAR_FORMAT(ss, d1))(_bitdepth_down_convert(COG_PLANAR_FORMAT(ss, d1))) VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert(COG_PLANAR_FORMAT(ss, d2))) +# Colourspace conversion +for d in [8, 10, 12, 16, 32]: + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d))(_convert_yuv444_to_rgb(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))) + + # We have a number of transformations that aren't supported directly, but are via an intermediate format # Bit depth and chroma combination conversions for (ss1, ss2) in distinct_pairs_from([PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]): diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py index 300055e..fb1548f 100644 --- a/mediagrains_py36/numpy/videograin.py +++ b/mediagrains_py36/numpy/videograin.py @@ -46,7 +46,7 @@ def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: For planar and padded formats this is the size of the native integer type that is used to handle the samples (eg. 8bit, 16bit, etc ...) For weird packed formats like v210 (10-bit samples packed so that there are 3 10-bit samples in every 32-bit word) this is not possible. - Instead for v210 we return int32, since that is the most useful native data type that always corresponds to an integral number of samples. + Instead for v210 we return uint32, since that is the most useful native data type that always corresponds to an integral number of samples. """ if COG_FRAME_IS_PLANAR(fmt): if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 47a430f..752ef07 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -355,12 +355,15 @@ def _test_pattern_rgb(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray def _test_pattern_yuv(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: (R, G, B) = self._test_pattern_rgb(fmt) + (R, G, B) = (R.astype(np.dtype(np.double)), + G.astype(np.dtype(np.double)), + B.astype(np.dtype(np.double))) bd = self._get_bitdepth(fmt) (hs, vs, _) = self._get_hs_vs_and_bps(fmt) Y = (R*0.2126 + G*0.7152 + B*0.0722) - U = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << bd - 1)) - V = (R*0.5 - G*0.454153 - B*0.045847 + (1 << bd - 1)) + U = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))) + V = (R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))) if hs == 1: U = (U[0::2, :] + U[1::2, :])/2 @@ -391,8 +394,8 @@ def assertArrayEqual(self, a: np.ndarray, b: np.ndarray, max_diff: Optional[int] if max_diff is None: self.assertTrue(np.array_equal(a, b), msg="{} != {}".format(a, b)) else: - a = a.astype(np.dtype(np.int32)) - b = b.astype(np.dtype(np.int32)) + a = a.astype(np.dtype(np.int64)) + b = b.astype(np.dtype(np.int64)) self.assertTrue(np.amax(np.absolute(a - b)) <= max_diff, msg="{} - {} = {} (allowing up to {} difference)".format(a, b, a - b, max_diff)) @@ -475,7 +478,6 @@ def pairs_from(fmts): for fmt_out in fmts: yield (fmt_in, fmt_out) - # This checks conversions within YUV and RGB space, but not conversions between the two for fmts in [ [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420], # All YUV 8bit formats [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR], # All 8-bit 3 component RGB formats @@ -487,6 +489,10 @@ def pairs_from(fmts): CogFrameFormat.S32_422, CogFrameFormat.S16_422, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.U8_422, # Bitdepth conversion CogFrameFormat.S32_420, CogFrameFormat.S16_420, CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_420_12BIT, CogFrameFormat.U8_420], # Bitdepth conversion [CogFrameFormat.S32_444_RGB, CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.U8_444_RGB], # Bitdepth conversion + [CogFrameFormat.U8_444, CogFrameFormat.U8_444_RGB], # Colourspace conversion + [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_444_10BIT_RGB], # Colourspace conversion + [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_444_12BIT_RGB], # Colourspace conversion + [CogFrameFormat.S16_444, CogFrameFormat.S16_444_RGB], # Colourspace conversion ]: for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): @@ -509,7 +515,10 @@ def pairs_from(fmts): # If we've converted from a different bit-depth we need to ignore rounding errors if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) - elif self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in): + elif fmt_in == CogFrameFormat.S16_444 and fmt_out == CogFrameFormat.S16_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=2) + elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or + (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): self.assertMatchesTestPattern(grain_out, max_diff=1) else: self.assertMatchesTestPattern(grain_out) From 0ff4ee576550d0fb82f59e817bc722c17b65306d Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 16 Oct 2019 16:16:17 +0100 Subject: [PATCH 63/76] numpy.convert: refactor to make composition a bit easier --- mediagrains/numpy.py | 4 +- mediagrains_py36/numpy/__init__.py | 4 +- mediagrains_py36/numpy/convert.py | 273 ++++++++++----------------- mediagrains_py36/numpy/videograin.py | 25 ++- tests/test36_numpy_videograin.py | 3 +- 5 files changed, 123 insertions(+), 186 deletions(-) diff --git a/mediagrains/numpy.py b/mediagrains/numpy.py index 1e1e6e3..725983c 100644 --- a/mediagrains/numpy.py +++ b/mediagrains/numpy.py @@ -22,8 +22,8 @@ from sys import version_info if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): - from mediagrains_py36.numpy import VideoGrain, VIDEOGRAIN, flow_id_for_converted_flow # noqa: F401 + from mediagrains_py36.numpy import VideoGrain, VIDEOGRAIN # noqa: F401 - __all__ = ['VideoGrain', 'VIDEOGRAIN', "flow_id_for_converted_flow"] + __all__ = ['VideoGrain', 'VIDEOGRAIN'] else: __all__ = [] diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py index d4256dd..662a315 100644 --- a/mediagrains_py36/numpy/__init__.py +++ b/mediagrains_py36/numpy/__init__.py @@ -20,6 +20,6 @@ """ from .videograin import VIDEOGRAIN, VideoGrain -from . convert import flow_id_for_converted_flow +from . import convert -__all__ = ['VideoGrain', 'VIDEOGRAIN', "flow_id_for_converted_flow"] +__all__ = ['VideoGrain', 'VIDEOGRAIN'] diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index c42f2d8..a19ab30 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -29,8 +29,6 @@ from .videograin import VideoGrain, VIDEOGRAIN -__all__ = ["flow_id_for_converted_flow"] - def distinct_pairs_from(vals): for i in range(0, len(vals)): @@ -38,53 +36,29 @@ def distinct_pairs_from(vals): yield (vals[i], vals[j]) -def compose(first: Callable[[VIDEOGRAIN], VIDEOGRAIN], second: Callable[[VIDEOGRAIN], VIDEOGRAIN]) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: +def compose(first: Callable[[VIDEOGRAIN, VIDEOGRAIN], None], intermediate: CogFrameFormat, second: Callable[[VIDEOGRAIN, VIDEOGRAIN], None]) -> Callable[[VIDEOGRAIN, VIDEOGRAIN], None]: """Compose two conversion functions together""" - return lambda grain: second(first(grain)) - - -def flow_id_for_converted_flow(source_id: UUID, fmt: CogFrameFormat) -> UUID: - return uuid5(source_id, "FORMAT_CONVERSION: {!r}".format(fmt)) + def _inner(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_intermediate = grain_in._similar_grain(intermediate) - -def new_grain(grain: VIDEOGRAIN, fmt: CogFrameFormat): - return VideoGrain(grain.source_id, - flow_id_for_converted_flow(grain.source_id, fmt), - origin_timestamp=grain.origin_timestamp, - sync_timestamp=grain.sync_timestamp, - cog_frame_format=fmt, - width=grain.width, - height=grain.height, - rate=grain.rate, - duration=grain.duration, - cog_frame_layout=grain.layout) + first(grain_in, grain_intermediate) + second(grain_intermediate, grain_out) + return _inner # Some simple conversions can be acheived by just copying the data from one grain to the other with no # clever work at all. All the cleverness is already present in the code that creates the component array views # in the mediagrains -def _simple_copy_convert_yuv(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) +def _simple_copy_convert_yuv(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = grain_in.component_data.U + grain_out.component_data.V[:,:] = grain_in.component_data.V - grain_out.component_data.Y[:,:] = grain_in.component_data.Y - grain_out.component_data.U[:,:] = grain_in.component_data.U - grain_out.component_data.V[:,:] = grain_in.component_data.V - - return grain_out - return _inner - -def _simple_copy_convert_rgb(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) - - grain_out.component_data.R[:,:] = grain_in.component_data.R - grain_out.component_data.G[:,:] = grain_in.component_data.G - grain_out.component_data.B[:,:] = grain_in.component_data.B - - return grain_out - return _inner +def _simple_copy_convert_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.R[:,:] = grain_in.component_data.R + grain_out.component_data.G[:,:] = grain_in.component_data.G + grain_out.component_data.B[:,:] = grain_in.component_data.B def _int_array_mean(a: np.ndarray, b: np.ndarray) -> np.ndarray: @@ -93,142 +67,98 @@ def _int_array_mean(a: np.ndarray, b: np.ndarray) -> np.ndarray: # Some conversions between YUV colour subsampling systems require a simple mean -def _simple_mean_convert_yuv444__yuv422(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) - - grain_out.component_data.Y[:,:] = grain_in.component_data.Y - - grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[0::2, :], grain_in.component_data.U[1::2, :]) - grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[0::2, :], grain_in.component_data.V[1::2, :]) - - return grain_out - return _inner +def _simple_mean_convert_yuv444__yuv422(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[0::2, :], grain_in.component_data.U[1::2, :]) + grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[0::2, :], grain_in.component_data.V[1::2, :]) -def _simple_mean_convert_yuv422__yuv420(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) +def _simple_mean_convert_yuv422__yuv420(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[:, 0::2], grain_in.component_data.U[:, 1::2]) + grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[:, 0::2], grain_in.component_data.V[:, 1::2]) - grain_out.component_data.Y[:,:] = grain_in.component_data.Y - - grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[:, 0::2], grain_in.component_data.U[:, 1::2]) - grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[:, 0::2], grain_in.component_data.V[:, 1::2]) - - return grain_out - return _inner # Other conversions require duplicating samples -def _simple_duplicate_convert_yuv422__yuv444(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) - - grain_out.component_data.Y[:,:] = grain_in.component_data.Y - - grain_out.component_data.U[0::2, :] = grain_in.component_data.U - grain_out.component_data.U[1::2, :] = grain_in.component_data.U - grain_out.component_data.V[0::2, :] = grain_in.component_data.V - grain_out.component_data.V[1::2, :] = grain_in.component_data.V - - return grain_out - return _inner +def _simple_duplicate_convert_yuv422__yuv444(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y -def _simple_duplicate_convert_yuv420__yuv422(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) + grain_out.component_data.U[0::2, :] = grain_in.component_data.U + grain_out.component_data.U[1::2, :] = grain_in.component_data.U + grain_out.component_data.V[0::2, :] = grain_in.component_data.V + grain_out.component_data.V[1::2, :] = grain_in.component_data.V - grain_out.component_data.Y[:,:] = grain_in.component_data.Y - grain_out.component_data.U[:, 0::2] = grain_in.component_data.U - grain_out.component_data.U[:, 1::2] = grain_in.component_data.U - grain_out.component_data.V[:, 0::2] = grain_in.component_data.V - grain_out.component_data.V[:, 1::2] = grain_in.component_data.V +def _simple_duplicate_convert_yuv420__yuv422(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y - return grain_out - return _inner + grain_out.component_data.U[:, 0::2] = grain_in.component_data.U + grain_out.component_data.U[:, 1::2] = grain_in.component_data.U + grain_out.component_data.V[:, 0::2] = grain_in.component_data.V + grain_out.component_data.V[:, 1::2] = grain_in.component_data.V # Bit depth conversions def _unbiased_right_shift(a: np.ndarray, n: int) -> np.ndarray: return (a >> n) + ((a >> (n - 1))&0x1) -def _bitdepth_down_convert(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) +def _bitdepth_down_convert(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(fmt) + grain_out.component_data[0][:] = _unbiased_right_shift(grain_in.component_data[0][:], bitshift) + grain_out.component_data[1][:] = _unbiased_right_shift(grain_in.component_data[1][:], bitshift) + grain_out.component_data[2][:] = _unbiased_right_shift(grain_in.component_data[2][:], bitshift) - grain_out.component_data[0][:] = _unbiased_right_shift(grain_in.component_data[0][:], bitshift) - grain_out.component_data[1][:] = _unbiased_right_shift(grain_in.component_data[1][:], bitshift) - grain_out.component_data[2][:] = _unbiased_right_shift(grain_in.component_data[2][:], bitshift) - - return grain_out - return _inner def _noisy_left_shift(a: np.ndarray, n: int) -> np.ndarray: rando = ((npr.random_sample(a.shape) * (1 << n)).astype(a.dtype)) & ((1 << n) - 1) return (a << n) + rando -def _bitdepth_up_convert(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) +def _bitdepth_up_convert(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(fmt) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + dt = grain_out.component_data[0].dtype - dt = grain_out.component_data[0].dtype - - grain_out.component_data[0][:] = _noisy_left_shift(grain_in.component_data[0][:].astype(dt), bitshift) - grain_out.component_data[1][:] = _noisy_left_shift(grain_in.component_data[1][:].astype(dt), bitshift) - grain_out.component_data[2][:] = _noisy_left_shift(grain_in.component_data[2][:].astype(dt), bitshift) - - return grain_out - return _inner + grain_out.component_data[0][:] = _noisy_left_shift(grain_in.component_data[0][:].astype(dt), bitshift) + grain_out.component_data[1][:] = _noisy_left_shift(grain_in.component_data[1][:].astype(dt), bitshift) + grain_out.component_data[2][:] = _noisy_left_shift(grain_in.component_data[2][:].astype(dt), bitshift) # Colourspace conversions (based on rec.709) -def _convert_rgb_to_yuv444(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) - bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - (R, G, B) = (grain_in.component_data.R, - grain_in.component_data.G, - grain_in.component_data.B) - - grain_out.component_data.Y[:,:] = (R*0.2126 + G*0.7152 + B*0.0722) - grain_out.component_data.U[:,:] = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))) - grain_out.component_data.V[:,:] = (R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))) - - return grain_out - return _inner +def _convert_rgb_to_yuv444(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) + (R, G, B) = (grain_in.component_data.R, + grain_in.component_data.G, + grain_in.component_data.B) -def _convert_yuv444_to_rgb(fmt: CogFrameFormat) -> Callable[[VIDEOGRAIN], VIDEOGRAIN]: - def _inner(grain_in: VIDEOGRAIN) -> VIDEOGRAIN: - grain_out = new_grain(grain_in, fmt) - bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - (Y, U, V) = (grain_in.component_data.Y.astype(np.dtype(np.double)), - grain_in.component_data.U.astype(np.dtype(np.double)) - (1 << (bd - 1)), - grain_in.component_data.V.astype(np.dtype(np.double)) - (1 << (bd - 1))) + grain_out.component_data.Y[:,:] = (R*0.2126 + G*0.7152 + B*0.0722) + grain_out.component_data.U[:,:] = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))) + grain_out.component_data.V[:,:] = (R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))) - grain_out.component_data.R[:,:] = (Y + V*1.5748) - grain_out.component_data.G[:,:] = (Y - U*0.187324 - V*0.468124) - grain_out.component_data.B[:,:] = (Y + U*1.8556) - return grain_out - return _inner +def _convert_yuv444_to_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + (Y, U, V) = (grain_in.component_data.Y.astype(np.dtype(np.double)), + grain_in.component_data.U.astype(np.dtype(np.double)) - (1 << (bd - 1)), + grain_in.component_data.V.astype(np.dtype(np.double)) - (1 << (bd - 1))) + + grain_out.component_data.R[:,:] = (Y + V*1.5748) + grain_out.component_data.G[:,:] = (Y - U*0.187324 - V*0.468124) + grain_out.component_data.B[:,:] = (Y + U*1.8556) # These methods automate the process of registering simple copy conversions def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): for i in range(0, len(fmts)): for j in range(i+1, len(fmts)): - VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_yuv(fmts[j])) - VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_yuv(fmts[i])) + VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_yuv) + VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_yuv) def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat]): for i in range(0, len(fmts)): for j in range(i+1, len(fmts)): - VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_rgb(fmts[j])) - VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb(fmts[i])) + VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_rgb) + VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb) # 4:2:2 8 bit YUV formats @@ -250,30 +180,30 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] # 8 bit 4:4:4 YUV to 8 bit 4:2:2 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) + VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, fmt)(_simple_mean_convert_yuv444__yuv422) # 8 bit 4:2:2 YUV to 8 bit 4:2:0 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.U8_420)) + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_420)(_simple_mean_convert_yuv422__yuv420) # 8 bit 4:4:4 YUV to 8 bit 4:2:0 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, CogFrameFormat.U8_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.U8_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.U8_422)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, CogFrameFormat.U8_420)(compose(_simple_mean_convert_yuv444__yuv422, CogFrameFormat.U8_422, _simple_mean_convert_yuv422__yuv420)) # 8 bit 4:2:0 YUV to 8 bit 4:2:2 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, fmt)(_simple_duplicate_convert_yuv420__yuv422(fmt)) + VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, fmt)(_simple_duplicate_convert_yuv420__yuv422) # 8 bit 4:2:0 YUV to 8 bit 4:4:4 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, CogFrameFormat.U8_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.U8_422)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, CogFrameFormat.U8_444)(compose(_simple_duplicate_convert_yuv420__yuv422, CogFrameFormat.U8_422, _simple_duplicate_convert_yuv422__yuv444)) # 8 bit 4:2:2 YUV to 8 bit 4:4:4 YUV for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.U8_444)) + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444) # 4:2:2 16 bit YUV formats @@ -284,68 +214,53 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] # 16 bit 4:4:4 YUV to 16 bit 4:2:2 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, fmt)(_simple_mean_convert_yuv444__yuv422(fmt)) + VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, fmt)(_simple_mean_convert_yuv444__yuv422) # 16 bit 4:2:2 YUV to 16 bit 4:2:0 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420)) + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_420)(_simple_mean_convert_yuv422__yuv420) # 16 bit 4:4:4 YUV to 16 bit 4:2:0 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, CogFrameFormat.S16_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, CogFrameFormat.S16_420)(compose(_simple_mean_convert_yuv444__yuv422, CogFrameFormat.S16_422, _simple_mean_convert_yuv422__yuv420)) # 16 bit 4:2:0 YUV to 16 bit 4:2:2 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, fmt)(_simple_duplicate_convert_yuv420__yuv422(fmt)) + VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, fmt)(_simple_duplicate_convert_yuv420__yuv422) # 16 bit 4:2:0 YUV to 16 bit 4:4:4 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, CogFrameFormat.S16_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422)(grain))) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, CogFrameFormat.S16_444)(compose(_simple_duplicate_convert_yuv420__yuv422, CogFrameFormat.S16_422, _simple_duplicate_convert_yuv422__yuv444)) # 16 bit 4:2:2 YUV to 16 bit 4:4:4 YUV for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444)) - - -# 10 bit conversions -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_10BIT)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_10BIT)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_420_10BIT)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_10BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_10BIT)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_422_10BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_10BIT)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_444_10BIT)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_10BIT)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_444_10BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_10BIT)) - -# 12 bit conversions -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_12BIT)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_12BIT)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_420_12BIT)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S16_420_12BIT)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S16_422_12BIT)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_12BIT, CogFrameFormat.S16_422_12BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_12BIT)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420_12BIT, CogFrameFormat.S16_444_12BIT)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S16_422_12BIT)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_444_12BIT)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S16_444_12BIT)) - -# 32 bit conversions -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_444, CogFrameFormat.S32_422)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S32_422)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_422, CogFrameFormat.S32_420)(_simple_mean_convert_yuv422__yuv420(CogFrameFormat.S32_420)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_444, CogFrameFormat.S32_420)(lambda grain: _simple_mean_convert_yuv422__yuv420(CogFrameFormat.S32_420)(_simple_mean_convert_yuv444__yuv422(CogFrameFormat.S32_422)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_420, CogFrameFormat.S32_422)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S32_422)) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_420, CogFrameFormat.S32_444)(lambda grain: _simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv420__yuv422(CogFrameFormat.S32_422)(grain))) -VIDEOGRAIN.grain_conversion(CogFrameFormat.S32_422, CogFrameFormat.S32_444)(_simple_duplicate_convert_yuv422__yuv444(CogFrameFormat.S32_444)) + VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444) + + +# higher bit-depth conversions +for bd in [10, 12, 32]: + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd))(_simple_mean_convert_yuv444__yuv422) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(_simple_mean_convert_yuv422__yuv420) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(compose(_simple_mean_convert_yuv444__yuv422, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), _simple_mean_convert_yuv422__yuv420)) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd))(_simple_duplicate_convert_yuv420__yuv422) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(compose(_simple_duplicate_convert_yuv420__yuv422, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), _simple_duplicate_convert_yuv422__yuv444)) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(_simple_duplicate_convert_yuv422__yuv444) # Bit depth conversions for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d2), COG_PLANAR_FORMAT(ss, d1))(_bitdepth_down_convert(COG_PLANAR_FORMAT(ss, d1))) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert(COG_PLANAR_FORMAT(ss, d2))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d2), COG_PLANAR_FORMAT(ss, d1))(_bitdepth_down_convert) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert) # Colourspace conversion for d in [8, 10, 12, 16, 32]: - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d))(_convert_yuv444_to_rgb(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d))) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d))(_convert_yuv444_to_rgb) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) # We have a number of transformations that aren't supported directly, but are via an intermediate format @@ -355,16 +270,20 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss1, d1), COG_PLANAR_FORMAT(ss2, d2))( compose( VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d1), COG_PLANAR_FORMAT(ss2, d1)), + COG_PLANAR_FORMAT(ss2, d1), VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss2, d2)))) VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss2, d1))( compose( VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss2, d2)), + COG_PLANAR_FORMAT(ss2, d2), VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss2, d1)))) VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss1, d2))( compose( VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss2, d2)), + COG_PLANAR_FORMAT(ss2, d2), VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d2)))) VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d1))( compose( VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d2)), + COG_PLANAR_FORMAT(ss1, d2), VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss1, d1)))) \ No newline at end of file diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py index fb1548f..394193b 100644 --- a/mediagrains_py36/numpy/videograin.py +++ b/mediagrains_py36/numpy/videograin.py @@ -29,6 +29,7 @@ from mediagrains import grain as bytesgrain from mediagrains import grain_constructors as bytesgrain_constructors from copy import copy, deepcopy +import uuid import numpy as np from numpy.lib.stride_tricks import as_strided @@ -226,19 +227,35 @@ def __repr__(self): @classmethod def grain_conversion(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat): """Decorator to apply to all grain conversion functions""" - def _inner(f: Callable[[cls], cls]) -> None: + def _inner(f: Callable[[cls, cls], None]) -> None: cls._grain_conversions[(fmt_in, fmt_out)] = f return f return _inner @classmethod - def _get_grain_conversion_function(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VIDEOGRAIN"], "VIDEOGRAIN"]: + def _get_grain_conversion_function(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VIDEOGRAIN", "VIDEOGRAIN"], None]: """Return the registered grain conversion function for a specified type conversion, or raise NotImplementedError""" if (fmt_in, fmt_out) in cls._grain_conversions: return cls._grain_conversions[(fmt_in, fmt_out)] raise NotImplementedError("This conversion has not yet been implemented") + def flow_id_for_converted_flow(self, fmt: CogFrameFormat) -> uuid.UUID: + return uuid.uuid5(self.source_id, "FORMAT_CONVERSION: {!r}".format(fmt)) + + def _similar_grain(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Returns a new empty grain that has the specified format, but other parameters identical to this grain.""" + return VideoGrain(self.source_id, + self.flow_id_for_converted_flow(fmt), + origin_timestamp=self.origin_timestamp, + sync_timestamp=self.sync_timestamp, + cog_frame_format=fmt, + width=self.width, + height=self.height, + rate=self.rate, + duration=self.duration, + cog_frame_layout=self.layout) + def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": """Used to convert this grain to a different cog format. @@ -249,7 +266,9 @@ def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": if self.format == fmt: return deepcopy(self) else: - return self.__class__._get_grain_conversion_function(self.format, fmt)(self) + grain_out = self._similar_grain(fmt) + self.__class__._get_grain_conversion_function(self.format, fmt)(self, grain_out) + return grain_out def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 752ef07..40b8038 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -19,7 +19,6 @@ import uuid from mediagrains.numpy import VideoGrain, VIDEOGRAIN -from mediagrains.numpy import flow_id_for_converted_flow from mediagrains_py36.numpy.videograin import _dtype_from_cogframeformat from mediagrains.cogenums import ( CogFrameFormat, @@ -507,7 +506,7 @@ def pairs_from(fmts): grain_out = grain_in.convert(fmt_out) if fmt_in != fmt_out: - flow_id_out = flow_id_for_converted_flow(src_id, fmt_out) + flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) else: flow_id_out = flow_id self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) From 4c9fc460422a4d07346cf92d06c693dfb49a1781 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 16 Oct 2019 17:28:55 +0100 Subject: [PATCH 64/76] numpy.convert: Add indirect transformations to allow conversion between any two formats not involving one of a small number of known bad conversions --- mediagrains_py36/numpy/convert.py | 199 ++++++++++++--------------- mediagrains_py36/numpy/videograin.py | 9 ++ tests/test36_numpy_videograin.py | 81 +++++------ 3 files changed, 136 insertions(+), 153 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index a19ab30..f11d658 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -102,19 +102,26 @@ def _simple_duplicate_convert_yuv420__yuv422(grain_in: VIDEOGRAIN, grain_out: VI def _unbiased_right_shift(a: np.ndarray, n: int) -> np.ndarray: return (a >> n) + ((a >> (n - 1))&0x1) -def _bitdepth_down_convert(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): +def _bitdepth_down_convert_yuv(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) grain_out.component_data[0][:] = _unbiased_right_shift(grain_in.component_data[0][:], bitshift) grain_out.component_data[1][:] = _unbiased_right_shift(grain_in.component_data[1][:], bitshift) grain_out.component_data[2][:] = _unbiased_right_shift(grain_in.component_data[2][:], bitshift) +def _bitdepth_down_convert_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) + + grain_out.component_data.R[:] = _unbiased_right_shift(grain_in.component_data.R[:], bitshift) + grain_out.component_data.G[:] = _unbiased_right_shift(grain_in.component_data.G[:], bitshift) + grain_out.component_data.B[:] = _unbiased_right_shift(grain_in.component_data.B[:], bitshift) + def _noisy_left_shift(a: np.ndarray, n: int) -> np.ndarray: rando = ((npr.random_sample(a.shape) * (1 << n)).astype(a.dtype)) & ((1 << n) - 1) return (a << n) + rando -def _bitdepth_up_convert(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): +def _bitdepth_up_convert_yuv(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) dt = grain_out.component_data[0].dtype @@ -123,6 +130,15 @@ def _bitdepth_up_convert(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): grain_out.component_data[1][:] = _noisy_left_shift(grain_in.component_data[1][:].astype(dt), bitshift) grain_out.component_data[2][:] = _noisy_left_shift(grain_in.component_data[2][:].astype(dt), bitshift) +def _bitdepth_up_convert_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + + dt = grain_out.component_data[0].dtype + + grain_out.component_data.R[:] = _noisy_left_shift(grain_in.component_data.R[:].astype(dt), bitshift) + grain_out.component_data.G[:] = _noisy_left_shift(grain_in.component_data.G[:].astype(dt), bitshift) + grain_out.component_data.B[:] = _noisy_left_shift(grain_in.component_data.B[:].astype(dt), bitshift) + # Colourspace conversions (based on rec.709) def _convert_rgb_to_yuv444(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): @@ -161,129 +177,94 @@ def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat] VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb) -# 4:2:2 8 bit YUV formats -_register_simple_copy_conversions_for_formats_yuv([ - CogFrameFormat.YUYV, - CogFrameFormat.UYVY, - CogFrameFormat.U8_422]) - - -# 8 bit RGB formats -_register_simple_copy_conversions_for_formats_rgb([ - CogFrameFormat.RGB, - CogFrameFormat.U8_444_RGB, - CogFrameFormat.RGBx, - CogFrameFormat.xRGB, - CogFrameFormat.BGRx, - CogFrameFormat.xBGR]) - - -# 8 bit 4:4:4 YUV to 8 bit 4:2:2 YUV -for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, fmt)(_simple_mean_convert_yuv444__yuv422) - - -# 8 bit 4:2:2 YUV to 8 bit 4:2:0 YUV -for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_420)(_simple_mean_convert_yuv422__yuv420) - - -# 8 bit 4:4:4 YUV to 8 bit 4:2:0 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_444, CogFrameFormat.U8_420)(compose(_simple_mean_convert_yuv444__yuv422, CogFrameFormat.U8_422, _simple_mean_convert_yuv422__yuv420)) - - -# 8 bit 4:2:0 YUV to 8 bit 4:2:2 YUV -for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, fmt)(_simple_duplicate_convert_yuv420__yuv422) +def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: + equiv_categories = [ + (CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV), + (CogFrameFormat.S16_422, CogFrameFormat.v216), + (CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR)] + for cat in equiv_categories: + if fmt in cat: + return cat + return (fmt,) -# 8 bit 4:2:0 YUV to 8 bit 4:4:4 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.U8_420, CogFrameFormat.U8_444)(compose(_simple_duplicate_convert_yuv420__yuv422, CogFrameFormat.U8_422, _simple_duplicate_convert_yuv422__yuv444)) +_register_simple_copy_conversions_for_formats_yuv(_equivalent_formats(CogFrameFormat.U8_422)) +_register_simple_copy_conversions_for_formats_yuv(_equivalent_formats(CogFrameFormat.S16_422)) +_register_simple_copy_conversions_for_formats_rgb(_equivalent_formats(CogFrameFormat.U8_444_RGB)) -# 8 bit 4:2:2 YUV to 8 bit 4:4:4 YUV -for fmt in [CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.U8_444)(_simple_duplicate_convert_yuv422__yuv444) - - -# 4:2:2 16 bit YUV formats -_register_simple_copy_conversions_for_formats_yuv([ - CogFrameFormat.v216, - CogFrameFormat.S16_422]) - - -# 16 bit 4:4:4 YUV to 16 bit 4:2:2 YUV -for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, fmt)(_simple_mean_convert_yuv444__yuv422) - - -# 16 bit 4:2:2 YUV to 16 bit 4:2:0 YUV -for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_420)(_simple_mean_convert_yuv422__yuv420) - - -# 16 bit 4:4:4 YUV to 16 bit 4:2:0 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_444, CogFrameFormat.S16_420)(compose(_simple_mean_convert_yuv444__yuv422, CogFrameFormat.S16_422, _simple_mean_convert_yuv422__yuv420)) - - -# 16 bit 4:2:0 YUV to 16 bit 4:2:2 YUV -for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, fmt)(_simple_duplicate_convert_yuv420__yuv422) - - -# 16 bit 4:2:0 YUV to 16 bit 4:4:4 YUV -VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_420, CogFrameFormat.S16_444)(compose(_simple_duplicate_convert_yuv420__yuv422, CogFrameFormat.S16_422, _simple_duplicate_convert_yuv422__yuv444)) - - -# 16 bit 4:2:2 YUV to 16 bit 4:4:4 YUV -for fmt in [CogFrameFormat.S16_422, CogFrameFormat.v216]: - VIDEOGRAIN.grain_conversion(fmt, CogFrameFormat.S16_444)(_simple_duplicate_convert_yuv422__yuv444) - - -# higher bit-depth conversions -for bd in [10, 12, 32]: - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd))(_simple_mean_convert_yuv444__yuv422) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(_simple_mean_convert_yuv422__yuv420) +# 8 and 16 bit YUV colour subsampling conversions +for bd in [8, 10, 12, 16, 32]: + for fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd)): + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), fmt)(_simple_mean_convert_yuv444__yuv422) + VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(_simple_mean_convert_yuv422__yuv420) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), fmt)(_simple_duplicate_convert_yuv420__yuv422) + VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(_simple_duplicate_convert_yuv422__yuv444) VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(compose(_simple_mean_convert_yuv444__yuv422, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), _simple_mean_convert_yuv422__yuv420)) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd))(_simple_duplicate_convert_yuv420__yuv422) VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(compose(_simple_duplicate_convert_yuv420__yuv422, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), _simple_duplicate_convert_yuv422__yuv444)) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(_simple_duplicate_convert_yuv422__yuv444) # Bit depth conversions -for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: - for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d2), COG_PLANAR_FORMAT(ss, d1))(_bitdepth_down_convert) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss, d1), COG_PLANAR_FORMAT(ss, d2))(_bitdepth_up_convert) +for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]: + for fmt1 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): + for fmt2 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d2)): + VIDEOGRAIN.grain_conversion(fmt2, fmt1)(_bitdepth_down_convert_yuv) + VIDEOGRAIN.grain_conversion(fmt1, fmt2)(_bitdepth_up_convert_yuv) + for ss in [PlanarChromaFormat.RGB]: + for fmt1 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): + for fmt2 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d2)): + VIDEOGRAIN.grain_conversion(fmt2, fmt1)(_bitdepth_down_convert_rgb) + VIDEOGRAIN.grain_conversion(fmt1, fmt2)(_bitdepth_up_convert_rgb) # Colourspace conversion for d in [8, 10, 12, 16, 32]: - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d))(_convert_yuv444_to_rgb) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) + for fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d)): + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), fmt)(_convert_yuv444_to_rgb) + VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) # We have a number of transformations that aren't supported directly, but are via an intermediate format # Bit depth and chroma combination conversions for (ss1, ss2) in distinct_pairs_from([PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]): for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss1, d1), COG_PLANAR_FORMAT(ss2, d2))( - compose( - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d1), COG_PLANAR_FORMAT(ss2, d1)), - COG_PLANAR_FORMAT(ss2, d1), - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss2, d2)))) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss2, d1))( - compose( - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss2, d2)), - COG_PLANAR_FORMAT(ss2, d2), - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss2, d1)))) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss1, d2))( - compose( - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d1), COG_PLANAR_FORMAT(ss2, d2)), - COG_PLANAR_FORMAT(ss2, d2), - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d2)))) - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d1))( - compose( - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss2, d2), COG_PLANAR_FORMAT(ss1, d2)), - COG_PLANAR_FORMAT(ss1, d2), - VIDEOGRAIN._get_grain_conversion_function(COG_PLANAR_FORMAT(ss1, d2), COG_PLANAR_FORMAT(ss1, d1)))) \ No newline at end of file + for fmt11 in _equivalent_formats(COG_PLANAR_FORMAT(ss1, d1)): + for fmt22 in _equivalent_formats(COG_PLANAR_FORMAT(ss2, d2)): + VIDEOGRAIN.grain_conversion_two_step(fmt11, COG_PLANAR_FORMAT(ss2, d1), fmt22) + VIDEOGRAIN.grain_conversion_two_step(fmt22, COG_PLANAR_FORMAT(ss1, d2), fmt11) + for fmt12 in _equivalent_formats(COG_PLANAR_FORMAT(ss1, d2)): + for fmt21 in _equivalent_formats(COG_PLANAR_FORMAT(ss2, d1)): + VIDEOGRAIN.grain_conversion_two_step(fmt12, COG_PLANAR_FORMAT(ss2, d2), fmt21) + VIDEOGRAIN.grain_conversion_two_step(fmt21, COG_PLANAR_FORMAT(ss2, d2), fmt12) + +# RGB and non-444 YUV at same bit-depth +for d in [8, 10, 12, 16, 32]: + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d)): + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422]: + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), rgb_fmt) + +# RGB and YUV-444 with bit-depth conversion +for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2), rgb_fmt) + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1), rgb_fmt) + +# RGB to YUV with bit-depth and colour subsampling conversion +for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422]: + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d2)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), rgb_fmt) + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), rgb_fmt) \ No newline at end of file diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py index 394193b..4a6562a 100644 --- a/mediagrains_py36/numpy/videograin.py +++ b/mediagrains_py36/numpy/videograin.py @@ -232,6 +232,15 @@ def _inner(f: Callable[[cls, cls], None]) -> None: return f return _inner + @classmethod + def grain_conversion_two_step(cls, fmt_in: CogFrameFormat, fmt_mid: CogFrameFormat, fmt_out: CogFrameFormat): + """Register a grain conversion via an intermediate format, using existing conversions""" + def _inner(grain_in: "VIDEOGRAIN", grain_out: "VIDEOGRAIN"): + grain_mid = grain_in._similar_grain(fmt_mid) + cls._get_grain_conversion_function(fmt_in, fmt_mid)(grain_in, grain_mid) + cls._get_grain_conversion_function(fmt_mid, fmt_out)(grain_mid, grain_out) + cls.grain_conversion(fmt_in, fmt_out)(_inner) + @classmethod def _get_grain_conversion_function(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VIDEOGRAIN", "VIDEOGRAIN"], None]: """Return the registered grain conversion function for a specified type conversion, or raise NotImplementedError""" diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 40b8038..cfe0b8f 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -477,50 +477,43 @@ def pairs_from(fmts): for fmt_out in fmts: yield (fmt_in, fmt_out) - for fmts in [ - [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420], # All YUV 8bit formats - [CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR], # All 8-bit 3 component RGB formats - [CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420], # All YUV 16bit formats - [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT], # All YUV 10bit formats except for v210 - [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT], # All YUV 12bit formats - [CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420], # All YUV 32bit formats - [CogFrameFormat.S32_444, CogFrameFormat.S16_444, CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_444_12BIT, CogFrameFormat.U8_444, # Bitdepth conversion - CogFrameFormat.S32_422, CogFrameFormat.S16_422, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.U8_422, # Bitdepth conversion - CogFrameFormat.S32_420, CogFrameFormat.S16_420, CogFrameFormat.S16_420_10BIT, CogFrameFormat.S16_420_12BIT, CogFrameFormat.U8_420], # Bitdepth conversion - [CogFrameFormat.S32_444_RGB, CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.U8_444_RGB], # Bitdepth conversion - [CogFrameFormat.U8_444, CogFrameFormat.U8_444_RGB], # Colourspace conversion - [CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_444_10BIT_RGB], # Colourspace conversion - [CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_444_12BIT_RGB], # Colourspace conversion - [CogFrameFormat.S16_444, CogFrameFormat.S16_444_RGB], # Colourspace conversion - ]: - for (fmt_in, fmt_out) in pairs_from(fmts): - with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): - with mock.patch.object(Timestamp, "get_time", return_value=cts): - grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, - cog_frame_format=fmt_in, - width=16, height=16, cog_frame_layout=CogFrameLayout.FULL_FRAME) - - self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) - self.write_test_pattern(grain_in) - - grain_out = grain_in.convert(fmt_out) - - if fmt_in != fmt_out: - flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) - else: - flow_id_out = flow_id - self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) - - # If we've converted from a different bit-depth we need to ignore rounding errors - if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): - self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) - elif fmt_in == CogFrameFormat.S16_444 and fmt_out == CogFrameFormat.S16_444_RGB: - self.assertMatchesTestPattern(grain_out, max_diff=2) - elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or - (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): - self.assertMatchesTestPattern(grain_out, max_diff=1) - else: - self.assertMatchesTestPattern(grain_out) + fmts = [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420, # All YUV 8bit formats + CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR, # All 8-bit 3 component RGB formats + CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420, # All YUV 16bit formats + CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT, # All YUV 10bit formats except for v210 + CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT, # All YUV 12bit formats + CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420] # All YUV 32bit formats + for (fmt_in, fmt_out) in pairs_from(fmts): + if not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and self._is_rgb(fmt_out): + # Conversions from 32bit YUV to RGB don't work, and this is known, so ignore theseL + continue + with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=fmt_in, + width=16, height=16, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) + self.write_test_pattern(grain_in) + + grain_out = grain_in.convert(fmt_out) + + if fmt_in != fmt_out: + flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) + else: + flow_id_out = flow_id + self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) + + # If we've converted from a different bit-depth we need to ignore rounding errors + if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): + self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) + elif fmt_in == CogFrameFormat.S16_444 and fmt_out == CogFrameFormat.S16_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=2) + elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or + (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): + self.assertMatchesTestPattern(grain_out, max_diff=1) + else: + self.assertMatchesTestPattern(grain_out) def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From 589f737d72984d5e8b925e26ba04cef2f17d9125 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Wed, 16 Oct 2019 17:37:14 +0100 Subject: [PATCH 65/76] numpy.convert: Add excplicit check that bad conversions raise an ecxeption, and narrow the set of bad conversions --- mediagrains_py36/numpy/convert.py | 9 +++++-- tests/test36_numpy_videograin.py | 42 ++++++++++++++++--------------- 2 files changed, 29 insertions(+), 22 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index f11d658..07ca4d9 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -221,7 +221,9 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: # Colourspace conversion for d in [8, 10, 12, 16, 32]: for fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d)): - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), fmt)(_convert_yuv444_to_rgb) + if d != 32: + # This conversion doesn't work for 32-bit data, so I have disabled it + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), fmt)(_convert_yuv444_to_rgb) VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) @@ -251,7 +253,10 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1)): for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2)): VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), yuv_fmt) - VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2), rgb_fmt) + if d2 != 32: + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2), rgb_fmt) + else: + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), rgb_fmt) for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1)): VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index cfe0b8f..15a5a1a 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -484,9 +484,6 @@ def pairs_from(fmts): CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT, # All YUV 12bit formats CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420] # All YUV 32bit formats for (fmt_in, fmt_out) in pairs_from(fmts): - if not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and self._is_rgb(fmt_out): - # Conversions from 32bit YUV to RGB don't work, and this is known, so ignore theseL - continue with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): with mock.patch.object(Timestamp, "get_time", return_value=cts): grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, @@ -496,24 +493,29 @@ def pairs_from(fmts): self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) self.write_test_pattern(grain_in) - grain_out = grain_in.convert(fmt_out) - - if fmt_in != fmt_out: - flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) - else: - flow_id_out = flow_id - self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) - - # If we've converted from a different bit-depth we need to ignore rounding errors - if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): - self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) - elif fmt_in == CogFrameFormat.S16_444 and fmt_out == CogFrameFormat.S16_444_RGB: - self.assertMatchesTestPattern(grain_out, max_diff=2) - elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or - (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): - self.assertMatchesTestPattern(grain_out, max_diff=1) + if not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB: + # Conversions from 32bit YUV to RGB don't work, and this is known, so check that an exception is thrown: + with self.assertRaises(NotImplementedError): + grain_out = grain_in.convert(fmt_out) else: - self.assertMatchesTestPattern(grain_out) + grain_out = grain_in.convert(fmt_out) + + if fmt_in != fmt_out: + flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) + else: + flow_id_out = flow_id + self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) + + # If we've converted from a different bit-depth we need to ignore rounding errors + if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): + self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) + elif fmt_in == CogFrameFormat.S16_444 and fmt_out == CogFrameFormat.S16_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=2) + elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or + (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): + self.assertMatchesTestPattern(grain_out, max_diff=1) + else: + self.assertMatchesTestPattern(grain_out) def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From a380add71ebee36c5a57bdf5545a2f0dabc42cf4 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 10:21:01 +0100 Subject: [PATCH 66/76] numpy.convert: fixed generation of target flows to depend upon input flow id not source id --- mediagrains_py36/numpy/videograin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py index 4a6562a..58f833f 100644 --- a/mediagrains_py36/numpy/videograin.py +++ b/mediagrains_py36/numpy/videograin.py @@ -250,7 +250,7 @@ def _get_grain_conversion_function(cls, fmt_in: CogFrameFormat, fmt_out: CogFram raise NotImplementedError("This conversion has not yet been implemented") def flow_id_for_converted_flow(self, fmt: CogFrameFormat) -> uuid.UUID: - return uuid.uuid5(self.source_id, "FORMAT_CONVERSION: {!r}".format(fmt)) + return uuid.uuid5(self.flow_id, "FORMAT_CONVERSION: {!r}".format(fmt)) def _similar_grain(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": """Returns a new empty grain that has the specified format, but other parameters identical to this grain.""" From c102104868c35352b726d13f8493a6017997b6d9 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 10:28:29 +0100 Subject: [PATCH 67/76] Added asformat to numpy.VIDEOGRAIN --- mediagrains_py36/numpy/videograin.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py index 58f833f..5154ce3 100644 --- a/mediagrains_py36/numpy/videograin.py +++ b/mediagrains_py36/numpy/videograin.py @@ -266,7 +266,7 @@ def _similar_grain(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": cog_frame_layout=self.layout) def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": - """Used to convert this grain to a different cog format. + """Used to convert this grain to a different cog format. Always produces a new grain. :param fmt: The format to convert to :returns: A new grain of the specified format. Notably converting to the same format is the same as a deepcopy @@ -279,6 +279,18 @@ def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": self.__class__._get_grain_conversion_function(self.format, fmt)(self, grain_out) return grain_out + def asformat(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Used to ensure that this grain is in a particular format. Converts it if not. + + :param fmt: The format to ensure + :returns: self or a new grain. + :raises NotImplementedError if the requested conversion is not possible. + """ + if self.format == fmt: + return self + else: + return self.convert(fmt) + def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. From 2aa2824156011c3d8fa8481605344b1e603949dc Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 10:24:30 +0100 Subject: [PATCH 68/76] 2.6.0.dev9: version bump --- CHANGELOG.md | 1 + setup.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e1f81f..195f3e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Added `MovingBarOverlay` for test signal generators - Added `mediagrains.numpy` sublibrary for handling video grains as numpy arrays, in python 3.6+ - Added `PSNR` option to grain compare. +- Support for converting between supported video grain formats added to `mediagrains.numpy` ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/setup.py b/setup.py index e31c6b8..6a06321 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev7", + version="2.6.0.dev9", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From af8ca2ff6b9b7743c8430b42755ad966ef6485c8 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 11:16:27 +0100 Subject: [PATCH 69/76] Test patterns for v210 --- mediagrains_py36/numpy/convert.py | 4 ++++ tests/test36_numpy_videograin.py | 28 +++++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 07ca4d9..50c9090 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -163,6 +163,10 @@ def _convert_yuv444_to_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): grain_out.component_data.B[:,:] = (Y + U*1.8556) +def _convert_v210_to_yuv422_10bit(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + pass + + # These methods automate the process of registering simple copy conversions def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): for i in range(0, len(fmts)): diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 15a5a1a..7049649 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -37,6 +37,8 @@ from copy import copy, deepcopy from typing import Tuple, Optional +from itertools import chain, repeat + import numpy as np from pdb import set_trace @@ -373,6 +375,25 @@ def _test_pattern_yuv(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray return (np.around(Y).astype(_dtype_from_cogframeformat(fmt)), np.around(U).astype(_dtype_from_cogframeformat(fmt)), np.around(V).astype(_dtype_from_cogframeformat(fmt))) + def _test_pattern_v210(self) -> np.ndarray: + (Y, U, V) = self._test_pattern_yuv(CogFrameFormat.S16_422_10BIT) + + output = np.zeros(32*16, dtype=np.dtype(np.uint32)) + for y in range(0, 16): + + yy = chain(iter(Y[:, y]), repeat(0)) + uu = chain(iter(U[:, y]), repeat(0)) + vv = chain(iter(V[:, y]), repeat(0)) + + for x in range(0, 8): + output[y*32 + 4*x + 0] = next(uu) | (next(yy) << 10) | (next(vv) << 20) + output[y*32 + 4*x + 1] = next(yy) | (next(uu) << 10) | (next(yy) << 20) + output[y*32 + 4*x + 2] = next(vv) | (next(yy) << 10) | (next(uu) << 20) + output[y*32 + 4*x + 3] = next(yy) | (next(vv) << 10) | (next(yy) << 20) + + return output + + def write_test_pattern(self, grain): fmt = grain.format @@ -382,6 +403,8 @@ def write_test_pattern(self, grain): grain.component_data.R[:, :] = R grain.component_data.G[:, :] = G grain.component_data.B[:, :] = B + elif fmt == CogFrameFormat.v210: + grain.data[:] = self._test_pattern_v210() else: (Y, U, V) = self._test_pattern_yuv(fmt) @@ -407,6 +430,8 @@ def assertMatchesTestPattern(self, grain: VIDEOGRAIN, max_diff: Optional[int] = self.assertArrayEqual(grain.component_data.R[:, :], R, max_diff=max_diff) self.assertArrayEqual(grain.component_data.G[:, :], G, max_diff=max_diff) self.assertArrayEqual(grain.component_data.B[:, :], B, max_diff=max_diff) + elif fmt == CogFrameFormat.v210: + self.assertArrayEqual(grain.data, self._test_pattern_v210()) else: (Y, U, V) = self._test_pattern_yuv(fmt) @@ -481,6 +506,7 @@ def pairs_from(fmts): CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR, # All 8-bit 3 component RGB formats CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420, # All YUV 16bit formats CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT, # All YUV 10bit formats except for v210 + CogFrameFormat.v210, # v210, may the gods be merciful to us for including it CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT, # All YUV 12bit formats CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420] # All YUV 32bit formats for (fmt_in, fmt_out) in pairs_from(fmts): @@ -493,7 +519,7 @@ def pairs_from(fmts): self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) self.write_test_pattern(grain_in) - if not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB: + if (not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB) or (fmt_out == CogFrameFormat.v210 and fmt_in != CogFrameFormat.v210) or (fmt_in == CogFrameFormat.v210 and fmt_out not in [CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT]): # Conversions from 32bit YUV to RGB don't work, and this is known, so check that an exception is thrown: with self.assertRaises(NotImplementedError): grain_out = grain_in.convert(fmt_out) From b573561ad607ac507721f5230eff381edc4e5ab8 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 14:16:03 +0100 Subject: [PATCH 70/76] numpy.convert: v210 -> planar conversion --- mediagrains_py36/numpy/convert.py | 61 ++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 50c9090..4e7021b 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -164,7 +164,62 @@ def _convert_yuv444_to_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): def _convert_v210_to_yuv422_10bit(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): - pass + # This is a v210 -> planar descramble. It's not super fast, but it should be correct + # + # Input data is array of 32-bit words, arranged as a 1d array in repeating blocks of 4 like: + # lsb -> ->msb + # | U0 | Y0 | V0 |X| + # | Y1 | U1 | Y2 |X| + # | V1 | Y3 | U2 |X| + # | Y4 | V2 | Y5 |X| + # ... + + # Our first descramble simple creates arrays containing the first, second, or third sample from each dword: + # first = [U0] [Y1] [V1] [Y4] ... + # second = [Y0] [U1] [Y3] [V2] ... + # third = [V0] [Y2] [U2] [Y5] ... + + first = (grain_in.data & 0x3FF).astype(np.dtype(np.uint16)) + second = ((grain_in.data >> 10) & 0x3FF).astype(np.dtype(np.uint16)) + third = ((grain_in.data >> 20) & 0x3FF).astype(np.dtype(np.uint16)) + + # These arrays are still linear 1d arrays so we reinterpret them as 2d arrays, remembering that v210 has an alignment of 48 pixels horizontally + first.shape = (grain_in.height, 32*((grain_in.width + 47)//48)) + second.shape = (grain_in.height, 32*((grain_in.width + 47)//48)) + third.shape = (grain_in.height, 32*((grain_in.width + 47)//48)) + + # Our usual transpose to make the arrays more convenient + first = first.transpose() + second = second.transpose() + third = third.transpose() + + # Finally we can assign every third entry in the target component_data arrays with every second entry from one of the three intermediate arrays: + # eg: + # Y = [Y0] [ ] [ ] [Y3] [ ] [ ] ... + # Y = [Y0] [Y1] [ ] [Y3] [Y4] [ ] ... + # Y = [Y0] [Y1] [Y2] [Y3] [Y4] [Y5] ... + + grain_out.component_data.Y[0::3, :] = second[0::2, :][0:(grain_in.width + 2)//3, :] + grain_out.component_data.Y[1::3, :] = first[1::2, :][0:(grain_in.width + 1)//3, :] + grain_out.component_data.Y[2::3, :] = third[1::2, :][0:(grain_in.width + 0)//3, :] + + # And similarly for the chroma: + # U = [U0] [ ] [ ] ... + # U = [U0] [U1] [ ] ... + # U = [U0] [U1] [U2] ... + + grain_out.component_data.U[0::3, :] = first[0::4, :][0:(grain_in.width//2 + 2)//3, :] + grain_out.component_data.U[1::3, :] = second[1::4, :][0:(grain_in.width//2 + 1)//3, :] + grain_out.component_data.U[2::3, :] = third[2::4, :][0:(grain_in.width//2 + 0)//3, :] + + # And similarly for the chroma: + # V = [V0] [ ] [ ] ... + # V = [V0] [V1] [ ] ... + # V = [V0] [V1] [V2] ... + + grain_out.component_data.V[0::3, :] = third[0::4, :][0:(grain_in.width//2 + 2)//3, :] + grain_out.component_data.V[1::3, :] = first[2::4, :][0:(grain_in.width//2 + 1)//3, :] + grain_out.component_data.V[2::3, :] = second[3::4, :][0:(grain_in.width//2 + 0)//3, :] # These methods automate the process of registering simple copy conversions @@ -231,6 +286,10 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) +# V210 -> 10 bit 4:2:2 YUV +VIDEOGRAIN.grain_conversion(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT)(_convert_v210_to_yuv422_10bit) + + # We have a number of transformations that aren't supported directly, but are via an intermediate format # Bit depth and chroma combination conversions for (ss1, ss2) in distinct_pairs_from([PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]): From 74a7856b0bb4734d05b9e84169c83e9b782864aa Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 14:19:54 +0100 Subject: [PATCH 71/76] numpy.convert: Added two-step v210->any other formats --- mediagrains_py36/numpy/convert.py | 9 ++++++++- tests/test36_numpy_videograin.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 4e7021b..cb14f52 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -335,4 +335,11 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) - VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), rgb_fmt) \ No newline at end of file + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), rgb_fmt) + +# Conversions from v210 to other formats +for d in [8, 10, 12, 16, 32]: + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: + for fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d)): + if fmt != CogFrameFormat.S16_422_10BIT: + VIDEOGRAIN.grain_conversion_two_step(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT, fmt) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 7049649..4f97b04 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -519,7 +519,7 @@ def pairs_from(fmts): self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) self.write_test_pattern(grain_in) - if (not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB) or (fmt_out == CogFrameFormat.v210 and fmt_in != CogFrameFormat.v210) or (fmt_in == CogFrameFormat.v210 and fmt_out not in [CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT]): + if (not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB) or (fmt_out == CogFrameFormat.v210 and fmt_in != CogFrameFormat.v210): # Conversions from 32bit YUV to RGB don't work, and this is known, so check that an exception is thrown: with self.assertRaises(NotImplementedError): grain_out = grain_in.convert(fmt_out) From 0ed3ac26d67fcca6fa1894a7eb740ea85612c79c Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 14:52:04 +0100 Subject: [PATCH 72/76] numpy.convert: Added planar YUV 422 10-bit -> v210 conversion --- mediagrains_py36/numpy/convert.py | 28 ++++++++++++++++++++++++++-- tests/test36_numpy_videograin.py | 2 +- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index cb14f52..40d531b 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -222,6 +222,30 @@ def _convert_v210_to_yuv422_10bit(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): grain_out.component_data.V[2::3, :] = second[3::4, :][0:(grain_in.width//2 + 0)//3, :] +def _convert_yuv422_10bit_to_v210(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + # This won't be fast, but it should work. + + # Take every third entry in each component and arrange them + first = np.zeros((grain_in.height, 32*((grain_in.width + 47)//48)), dtype=np.dtype(np.uint32)).transpose() + second = np.zeros((grain_in.height, 32*((grain_in.width + 47)//48)), dtype=np.dtype(np.uint32)).transpose() + third = np.zeros((grain_in.height, 32*((grain_in.width + 47)//48)), dtype=np.dtype(np.uint32)).transpose() + + first[0::4, :][0:(grain_in.width//2 + 2)//3, :] = grain_in.component_data.U[0::3, :] + first[1::2, :][0:(grain_in.width + 1)//3, :] = grain_in.component_data.Y[1::3, :] + first[2::4, :][0:(grain_in.width//2 + 1)//3, :] = grain_in.component_data.V[1::3, :] + + second[0::2, :][0:(grain_in.width + 2)//3, :] = grain_in.component_data.Y[0::3, :] + second[1::4, :][0:(grain_in.width//2 + 1)//3, :] = grain_in.component_data.U[1::3, :] + second[3::4, :][0:(grain_in.width//2 + 0)//3, :] = grain_in.component_data.V[2::3, :] + + third[0::4, :][0:(grain_in.width//2 + 2)//3, :] = grain_in.component_data.V[0::3, :] + third[1::2, :][0:(grain_in.width + 0)//3, :] = grain_in.component_data.Y[2::3, :] + third[2::4, :][0:(grain_in.width//2 + 0)//3, :] = grain_in.component_data.U[2::3, :] + + # Now combine them to make the dwords expected + grain_out.data[:] = np.ravel(first.transpose()) + (np.ravel(second.transpose()) << 10) + (np.ravel(third.transpose()) << 20) + + # These methods automate the process of registering simple copy conversions def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): for i in range(0, len(fmts)): @@ -286,9 +310,9 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) -# V210 -> 10 bit 4:2:2 YUV +# V210 <-> 10 bit 4:2:2 YUV VIDEOGRAIN.grain_conversion(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT)(_convert_v210_to_yuv422_10bit) - +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.v210)(_convert_yuv422_10bit_to_v210) # We have a number of transformations that aren't supported directly, but are via an intermediate format # Bit depth and chroma combination conversions diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 4f97b04..7c08d47 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -519,7 +519,7 @@ def pairs_from(fmts): self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) self.write_test_pattern(grain_in) - if (not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB) or (fmt_out == CogFrameFormat.v210 and fmt_in != CogFrameFormat.v210): + if (not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB) or (fmt_out == CogFrameFormat.v210 and fmt_in not in [CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT]): # Conversions from 32bit YUV to RGB don't work, and this is known, so check that an exception is thrown: with self.assertRaises(NotImplementedError): grain_out = grain_in.convert(fmt_out) From 747b8bada668db6b08687f3cd10a668bfdf7639b Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 17:25:52 +0100 Subject: [PATCH 73/76] numpy.convert: Added multistep conversions to v210 --- mediagrains_py36/numpy/convert.py | 24 +++++----- tests/test36_numpy_videograin.py | 73 ++++++++++++++++++++++++------- 2 files changed, 66 insertions(+), 31 deletions(-) diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py index 40d531b..d0dea23 100644 --- a/mediagrains_py36/numpy/convert.py +++ b/mediagrains_py36/numpy/convert.py @@ -147,9 +147,9 @@ def _convert_rgb_to_yuv444(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): grain_in.component_data.G, grain_in.component_data.B) - grain_out.component_data.Y[:,:] = (R*0.2126 + G*0.7152 + B*0.0722) - grain_out.component_data.U[:,:] = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))) - grain_out.component_data.V[:,:] = (R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))) + np.clip((R*0.2126 + G*0.7152 + B*0.0722), 0, 1 << bd, out=grain_out.component_data.Y, casting="unsafe") + np.clip((R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))), 0, 1 << bd, out=grain_out.component_data.U, casting="unsafe") + np.clip((R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))), 0, 1 << bd, out=grain_out.component_data.V, casting="unsafe") def _convert_yuv444_to_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): @@ -158,9 +158,9 @@ def _convert_yuv444_to_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): grain_in.component_data.U.astype(np.dtype(np.double)) - (1 << (bd - 1)), grain_in.component_data.V.astype(np.dtype(np.double)) - (1 << (bd - 1))) - grain_out.component_data.R[:,:] = (Y + V*1.5748) - grain_out.component_data.G[:,:] = (Y - U*0.187324 - V*0.468124) - grain_out.component_data.B[:,:] = (Y + U*1.8556) + np.clip((Y + V*1.5748), 0, 1 << bd, out=grain_out.component_data.R, casting="unsafe") + np.clip((Y - U*0.187324 - V*0.468124), 0, 1 << bd, out=grain_out.component_data.G, casting="unsafe") + np.clip((Y + U*1.8556), 0, 1 << bd, out=grain_out.component_data.B, casting="unsafe") def _convert_v210_to_yuv422_10bit(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): @@ -304,9 +304,7 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: # Colourspace conversion for d in [8, 10, 12, 16, 32]: for fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d)): - if d != 32: - # This conversion doesn't work for 32-bit data, so I have disabled it - VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), fmt)(_convert_yuv444_to_rgb) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), fmt)(_convert_yuv444_to_rgb) VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) @@ -340,10 +338,7 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1)): for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2)): VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), yuv_fmt) - if d2 != 32: - VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2), rgb_fmt) - else: - VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), rgb_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2), rgb_fmt) for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1)): VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) @@ -366,4 +361,5 @@ def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: for fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d)): if fmt != CogFrameFormat.S16_422_10BIT: - VIDEOGRAIN.grain_conversion_two_step(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT, fmt) \ No newline at end of file + VIDEOGRAIN.grain_conversion_two_step(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT, fmt) + VIDEOGRAIN.grain_conversion_two_step(fmt, CogFrameFormat.S16_422_10BIT, CogFrameFormat.v210) \ No newline at end of file diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py index 7c08d47..5157437 100644 --- a/tests/test36_numpy_videograin.py +++ b/tests/test36_numpy_videograin.py @@ -30,7 +30,9 @@ COG_FRAME_IS_COMPRESSED, COG_FRAME_IS_PLANAR, COG_FRAME_IS_PLANAR_RGB, - COG_FRAME_FORMAT_ACTIVE_BITS) + COG_FRAME_FORMAT_ACTIVE_BITS, + COG_PLANAR_FORMAT, + PlanarChromaFormat) from mediatimestamp.immutable import Timestamp, TimeRange import mock from fractions import Fraction @@ -508,7 +510,8 @@ def pairs_from(fmts): CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT, # All YUV 10bit formats except for v210 CogFrameFormat.v210, # v210, may the gods be merciful to us for including it CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT, # All YUV 12bit formats - CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420] # All YUV 32bit formats + CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420, # All YUV 32bit formats + CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.S32_444_RGB] # Other planar RGB formats for (fmt_in, fmt_out) in pairs_from(fmts): with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): with mock.patch.object(Timestamp, "get_time", return_value=cts): @@ -519,29 +522,65 @@ def pairs_from(fmts): self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) self.write_test_pattern(grain_in) - if (not self._is_rgb(fmt_in) and self._get_bitdepth(fmt_in) == 32 and fmt_out == CogFrameFormat.S32_444_RGB) or (fmt_out == CogFrameFormat.v210 and fmt_in not in [CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT]): - # Conversions from 32bit YUV to RGB don't work, and this is known, so check that an exception is thrown: - with self.assertRaises(NotImplementedError): - grain_out = grain_in.convert(fmt_out) - else: - grain_out = grain_in.convert(fmt_out) - - if fmt_in != fmt_out: - flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) - else: - flow_id_out = flow_id - self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) + grain_out = grain_in.convert(fmt_out) - # If we've converted from a different bit-depth we need to ignore rounding errors + if fmt_in != fmt_out: + flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) + else: + flow_id_out = flow_id + self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) + + # Some conversions for v210 are just really hard to check when not exact + # For other formats it's simpler + if fmt_out != CogFrameFormat.v210: + # We have several possible cases here: + # * We've changed bit-depth + # * We've changed colour subsampling + # * We've changed colourspace + # + # In addition we have have done none of those things, or even more than one + + # If we've increased bit-depth there will be rounding errors if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): - self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 1 - self._get_bitdepth(fmt_in))) - elif fmt_in == CogFrameFormat.S16_444 and fmt_out == CogFrameFormat.S16_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 2 - self._get_bitdepth(fmt_in))) + + # If we're changing from yuv to rgb then there's some potential for floating point errors, depending on the sizes + elif self._get_bitdepth(fmt_in) >= 16 and not self._is_rgb(fmt_in) and fmt_out == CogFrameFormat.S16_444_RGB: self.assertMatchesTestPattern(grain_out, max_diff=2) + elif self._get_bitdepth(fmt_in) == 32 and not self._is_rgb(fmt_in) and fmt_out == CogFrameFormat.S32_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=1 << 10) # The potential errors in 32 bit conversions are very large + + # If we've decreased bit-depth *and* or changed from rgb to yuv then there is a smaller scope for error elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): self.assertMatchesTestPattern(grain_out, max_diff=1) + + # If we're in none of these cases then the transformation should be lossless else: self.assertMatchesTestPattern(grain_out) + else: + grain_rev = grain_out.convert(fmt_in) + + # The conversion from 10-bit 422 should be lossless + if fmt_in in [CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT]: + self.assertMatchesTestPattern(grain_rev) + + # If we are not colour space converting and our input bit-depth is equal or lower to 10bits we have minor scope for rounding error + elif self._get_bitdepth(fmt_in) in [8, 10] and not self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=1) + + # If we are significantly lowering the bit depth then there is potential for significant error when reversing the process + elif self._get_bitdepth(fmt_in) in [12, 16, 32] and not self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=1 << (self._get_bitdepth(fmt_in) - 9)) + + # And even more if we are also colour converting + elif self._get_bitdepth(fmt_in) in [12, 16, 32] and self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=1 << (self._get_bitdepth(fmt_in) - 8)) + + # Otherwise if we are only colour converting then the potential error is a small floating point rounding error + elif self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=4) + def test_video_grain_create_discontiguous(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") From 02b2e3d5a1a11336fb7abb608a651d168932d87f Mon Sep 17 00:00:00 2001 From: James Weaver Date: Mon, 21 Oct 2019 17:28:23 +0100 Subject: [PATCH 74/76] v2.6.0.dev10: version bump --- CHANGELOG.md | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 195f3e8..464b7f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ - Added `MovingBarOverlay` for test signal generators - Added `mediagrains.numpy` sublibrary for handling video grains as numpy arrays, in python 3.6+ - Added `PSNR` option to grain compare. -- Support for converting between supported video grain formats added to `mediagrains.numpy` +- Support for converting between all uncompressed video grain formats added to `mediagrains.numpy` ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/setup.py b/setup.py index 6a06321..1c09874 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev9", + version="2.6.0.dev10", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From 803bf7733d0aa4c16b2d5093eeb6028687303f29 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 22 Oct 2019 10:34:20 +0100 Subject: [PATCH 75/76] 2.6.0-dev11: final version to support py2.7 --- CHANGELOG.md | 1 + setup.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 464b7f0..201a497 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - Added `mediagrains.numpy` sublibrary for handling video grains as numpy arrays, in python 3.6+ - Added `PSNR` option to grain compare. - Support for converting between all uncompressed video grain formats added to `mediagrains.numpy` +- This is the last release that will support python 2.7 (apart from bugfixes) ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/setup.py b/setup.py index 1c09874..f23a588 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev10", + version="2.6.0.dev11", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', From f481fe7781688408e4a8e22625960694b04cdb36 Mon Sep 17 00:00:00 2001 From: James Weaver Date: Tue, 22 Oct 2019 14:37:23 +0100 Subject: [PATCH 76/76] v2.6.0: Release --- CHANGELOG.md | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 201a497..50f23e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Mediagrains Library Changelog -## 2.6.0 (Under development) +## 2.6.0 - Added support for async methods to gsf decoder in python 3.6+ - Added `Grain.origin_timerange` method. - Added `Grain.normalise_time` method. diff --git a/setup.py b/setup.py index f23a588..db72e49 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ package_names = list(packages.keys()) setup(name="mediagrains", - version="2.6.0.dev11", + version="2.6.0", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver',