From 65851aedf0cfb156d8c8ecab3751b7c9364b4b35 Mon Sep 17 00:00:00 2001 From: totaam Date: Thu, 9 Nov 2023 21:40:25 +0700 Subject: [PATCH] cosmetic --- xpra/server/window/compress.py | 908 +++++++++++++-------------- xpra/server/window/video_compress.py | 741 +++++++++++----------- 2 files changed, 812 insertions(+), 837 deletions(-) diff --git a/xpra/server/window/compress.py b/xpra/server/window/compress.py index 5e66eeb3ec..cc1d68ba0c 100644 --- a/xpra/server/window/compress.py +++ b/xpra/server/window/compress.py @@ -74,7 +74,7 @@ STRICT_MODE = envbool("XPRA_ENCODING_STRICT_MODE", False) MAX_QUALITY = envint("XPRA_ENCODING_MAX_QUALITY", 100) MAX_SPEED = envint("XPRA_ENCODING_MAX_SPEED", 100) -assert MAX_QUALITY>0 and MAX_SPEED>0 +assert MAX_QUALITY > 0 and MAX_SPEED > 0 MERGE_REGIONS = envbool("XPRA_MERGE_REGIONS", True) DOWNSCALE = envbool("XPRA_DOWNSCALE", True) @@ -142,12 +142,12 @@ def capr(v) -> int: def get_encoder_type(encoder) -> str: if not encoder: return "none" - mod = getattr(encoder, "__module__") #ie: 'xpra.codecs.pillow.encoder' + mod = getattr(encoder, "__module__") # ie: 'xpra.codecs.pillow.encoder' if not mod: return "none" if mod.endswith(".encoder"): - mod = mod[:-len(".encoder")] #ie: 'xpra.codecs.pillow' - mod = mod.split(".")[-1] #ie: 'pillow' + mod = mod[:-len(".encoder")] # ie: 'xpra.codecs.pillow' + mod = mod.split(".")[-1] # ie: 'pillow' return mod @@ -189,19 +189,19 @@ def __init__(self, self.start_time = monotonic() self.ui_thread = threading.current_thread() - self.record_congestion_event = record_congestion_event #callback for send latency problems - self.queue_size = queue_size #callback to get the size of the damage queue - self.call_in_encode_thread = call_in_encode_thread #callback to add damage data which is ready to compress to the damage processing queue - self.queue_packet = queue_packet #callback to add a network packet to the outgoing queue + self.record_congestion_event = record_congestion_event # callback for send latency problems + self.queue_size = queue_size # callback to get the size of the damage queue + self.call_in_encode_thread = call_in_encode_thread # callback to add damage data which is ready to compress to the damage processing queue + self.queue_packet = queue_packet # callback to add a network packet to the outgoing queue self.wid : int = wid - self.window = window #only to be used from the UI thread! - self.global_statistics : GlobalPerformanceStatistics = statistics #shared/global statistics from ClientConnection + self.window = window # only to be used from the UI thread! + self.global_statistics : GlobalPerformanceStatistics = statistics # shared/global statistics from ClientConnection self.statistics : WindowPerformanceStatistics = WindowPerformanceStatistics() - self.av_sync : bool = av_sync #flag: enabled or not? - self.av_sync_delay = av_sync_delay #the av-sync delay we actually use - self.av_sync_delay_target = av_sync_delay #the av-sync delay we want at this point in time (can vary quickly) - self.av_sync_delay_base = av_sync_delay #the total av-sync delay we are trying to achieve (including video encoder delay) - self.av_sync_frame_delay : int = 0 #how long frames spend in the video encoder + self.av_sync : bool = av_sync # flag: enabled or not? + self.av_sync_delay = av_sync_delay # the av-sync delay we actually use + self.av_sync_delay_target = av_sync_delay # the av-sync delay we want at this point in time (can vary quickly) + self.av_sync_delay_base = av_sync_delay # the total av-sync delay we are trying to achieve (including video encoder delay) + self.av_sync_frame_delay : int = 0 # how long frames spend in the video encoder self.av_sync_timer : int = 0 self.encode_queue : list[tuple] = [] self.encode_queue_max_size : int = 10 @@ -209,13 +209,13 @@ def __init__(self, self.server_core_encodings = server_core_encodings self.server_encodings = server_encodings - self.encoding = encoding #the current encoding - self.encodings = encodings #all the encodings supported by the client - self.core_encodings = core_encodings #the core encodings supported by the client - self.picture_encodings = () #non-video only - self.rgb_formats = rgb_formats #supported RGB formats (RGB, RGBA, ...) - used by mmap - self.encoding_options = encoding_options #extra options which may be specific to the encoder (ie: x264) - self.rgb_lz4 : bool = use("lz4") and encoding_options.boolget("rgb_lz4", False) #server and client support lz4 pixel compression + self.encoding = encoding # the current encoding + self.encodings = encodings # all the encodings supported by the client + self.core_encodings = core_encodings # the core encodings supported by the client + self.picture_encodings = () # non-video only + self.rgb_formats = rgb_formats # supported RGB formats (RGB, RGBA, ...) - used by mmap + self.encoding_options = encoding_options # extra options which may be specific to the encoder (ie: x264) + self.rgb_lz4 : bool = use("lz4") and encoding_options.boolget("rgb_lz4", False) # server and client support lz4 pixel compression self.client_render_size = encoding_options.get("render-size") self.client_bit_depth : int = encoding_options.intget("bit-depth", 24) self.supports_transparency : bool = HAS_ALPHA and encoding_options.boolget("transparency", True) @@ -226,7 +226,7 @@ def __init__(self, self.send_window_size : bool = encoding_options.boolget("send-window-size", False) self.decoder_speed = typedict(self.encoding_options.dictget("decoder-speed") or {}) self.batch_config = batch_config - #auto-refresh: + # auto-refresh: self.auto_refresh_delay = auto_refresh_delay self.base_auto_refresh_delay = auto_refresh_delay self.last_auto_refresh_message = None @@ -239,19 +239,19 @@ def __init__(self, self.is_shadow : bool = window.is_shadow() self.has_alpha : bool = HAS_ALPHA and window.has_alpha() self.window_dimensions = ww, wh - #where the window is mapped on the client: + # where the window is mapped on the client: self.mapped_at = None self.fullscreen : bool = not self.is_tray and window.get("fullscreen") if default_encoding_options.get("scaling.control") is None: self.scaling_control = None #means "auto" else: - #ClientConnection sets defaults with the client's scaling.control value + # ClientConnection sets defaults with the client's scaling.control value self.scaling_control = default_encoding_options.intget("scaling.control", 1) self.scaling = None - self.maximized : bool = False #set by the client! + self.maximized : bool = False # set by the client! self.iconic : bool = False self.window_signal_handlers = [] - #watch for changes to properties that are used to derive the content-type: + # watch for changes to properties that are used to derive the content-type: self.content_type : str = window.get("content-type", "") if "content-type" in window.get_dynamic_property_names(): sid = window.connect("notify::content-type", self.content_type_changed) @@ -264,8 +264,8 @@ def __init__(self, sid = window.connect("notify::fullscreen", self._fullscreen_changed) self.window_signal_handlers.append(sid) if "children" in window.get_internal_property_names(): - #we just copy the value to an attribute of window-source, - #so that we can access it from any thread + # we just copy the value to an attribute of window-source, + # so that we can access it from any thread def children_updated(*_args): self.children = window.get_property("children") sid = window.connect("notify::children", children_updated) @@ -281,10 +281,10 @@ def children_updated(*_args): self.image_depth : int = window.get_property("depth") # general encoding tunables (mostly used by video encoders): - #keep track of the target encoding_quality: (event time, info, encoding speed): + # keep track of the target encoding_quality: (event time, info, encoding speed): self._encoding_quality : deque[tuple[float,int]] = deque(maxlen=100) self._encoding_quality_info : dict[str,Any] = {} - #keep track of the target encoding_speed: (event time, info, encoding speed): + # keep track of the target encoding_speed: (event time, info, encoding speed): self._encoding_speed : deque[tuple[float,int]] = deque(maxlen=100) self._encoding_speed_info : dict[str,Any] = {} # they may have fixed values: @@ -319,23 +319,23 @@ def children_updated(*_args): self.window_signal_handlers.append(sid) if self.has_alpha and BROWSER_ALPHA_FIX and not self.is_OR: - #remove alpha from 'NORMAL' browser windows - #of a size greater than 200x200: - if self.content_type.find("browser")>=0 and "NORMAL" in self.window_type and ww>=200 and wh>=200: + # remove alpha from 'NORMAL' browser windows + # of a size greater than 200x200: + if self.content_type.find("browser")>=0 and "NORMAL" in self.window_type and ww >= 200 and wh >= 200: self.has_alpha = False - #will be overridden by update_quality() and update_speed() called from update_encoding_selection() - #just here for clarity: - nobwl = (self.bandwidth_limit or 0)<=0 - if self._quality_hint>=0: + # will be overridden by update_quality() and update_speed() called from update_encoding_selection() + # just here for clarity: + nobwl = (self.bandwidth_limit or 0) <= 0 + if self._quality_hint >= 0: self._current_quality = capr(self._quality_hint) - elif self._fixed_quality>0: + elif self._fixed_quality > 0: self._current_quality = capr(self._fixed_quality) else: self._current_quality = capr(encoding_options.intget("initial_quality", INITIAL_QUALITY*(1+int(nobwl)))) - if self._speed_hint>=0: + if self._speed_hint >= 0: self._current_speed = capr(self._speed_hint) - elif self._fixed_speed>0: + elif self._fixed_speed > 0: self._current_speed = capr(self._fixed_speed) else: self._current_speed = capr(encoding_options.intget("initial_speed", INITIAL_SPEED*(1+int(nobwl)))) @@ -345,7 +345,7 @@ def children_updated(*_args): self._rgb_auto_threshold : int = MAX_PIXELS_PREFER_RGB log("initial encoding for %s: %s", self.wid, self.encoding) - #ready to service: + # ready to service: self._damage_cancelled = 0 def __repr__(self) -> str: @@ -358,13 +358,12 @@ def ui_thread_check(self) -> None: if ct != self.ui_thread: raise RuntimeError(f"called from {ct.name!r} instead of UI thread {self.ui_thread}") - - def insert_encoder(self, encoder_name:str, encoding:str, encode_fn:Callable) -> None: + def insert_encoder(self, encoder_name: str, encoding: str, encode_fn: Callable) -> None: log(f"insert_encoder({encoder_name}, {encoding}, {encode_fn})") self._all_encoders.setdefault(encoding, []).insert(0, encode_fn) self._encoders[encoding] = encode_fn - def append_encoder(self, encoding:str, encode_fn:Callable) -> None: + def append_encoder(self, encoding: str, encode_fn: Callable) -> None: log("append_encoder(%s, %s)", encoding, encode_fn) self._all_encoders.setdefault(encoding, []).append(encode_fn) if encoding not in self._encoders: @@ -376,10 +375,11 @@ def init_encoders(self) -> None: self.update_encoding_selection(self.encoding, init=True) def do_init_encoders(self) -> None: - self._all_encoders : dict[str,list[Callable]] = {} - self._encoders : dict[str,Callable] = {} + self._all_encoders : dict[str, list[Callable]] = {} + self._encoders : dict[str, Callable] = {} picture_encodings = set() - def add(encoder_name:str): + + def add(encoder_name: str): encoder = get_codec(encoder_name) if not encoder: return None @@ -391,24 +391,24 @@ def add(encoder_name:str): rgb = add("enc_rgb") if not rgb: log.warn("Warning: plain rgb encoder is missing!") - #we need pillow for scaling and grayscale: + # we need pillow for scaling and grayscale: pillow = add("enc_pillow") - if self._mmap_size>0: + if self._mmap_size > 0: try: from xpra.net.mmap import mmap_write + assert mmap_write except ImportError: if first_time("mmap_write missing"): log.warn("Warning: cannot use mmap, no write method support") else: - self.mmap_write = mmap_write self.insert_encoder("mmap", "mmap", self.mmap_encode) if not FORCE_PILLOW or not pillow: - #prefer these native encoders over the Pillow version: + # prefer these native encoders over the Pillow version: add("enc_spng") add("enc_webp") add("enc_jpeg") add("enc_avif") - #prefer nvjpeg over all the other jpeg encoders: + # prefer nvjpeg over all the other jpeg encoders: log("init_encoders() cuda_device_context=%s", self.cuda_device_context) if self.cuda_device_context: add("enc_nvjpeg") @@ -420,8 +420,8 @@ def init_vars(self) -> None: self.server_encodings = () self.encoding = "" self.encodings = () - self.encoding_last_used : str = "" - self.auto_refresh_encodings = () + self.encoding_last_used = "" + self.auto_refresh_encodings: tuple[str, ...] = () self.core_encodings = () self.rgb_formats = () self.full_csc_modes = typedict() @@ -430,10 +430,9 @@ def init_vars(self) -> None: self.rgb_lz4 = False self.supports_transparency = False self.full_frames_only = False - self.suspended : bool = False + self.suspended = False self.strict = STRICT_MODE self.decoder_speed = typedict() - self.mmap_write = None # self.decode_error_refresh_timer : int = 0 self.may_send_timer : int = 0 @@ -491,8 +490,8 @@ def cleanup(self) -> None: self.init_vars() self._mmap_size = 0 self.batch_config.cleanup() - #we can only clear the encoders after clearing the whole encoding queue: - #(because mmap cannot be cancelled once queued for encoding) + # we can only clear the encoders after clearing the whole encoding queue: + # (because mmap cannot be cancelled once queued for encoding) self.call_in_encode_thread(False, self.encode_ended) def encode_ended(self) -> None: @@ -513,7 +512,7 @@ def ui_cleanup(self) -> None: def get_info(self) -> dict[str,Any]: - #should get prefixed with "client[M].window[N]." by caller + # should get prefixed with "client[M].window[N]." by caller """ Add window specific stats """ @@ -529,12 +528,12 @@ def get_info(self) -> dict[str,Any]: }, }) try: - #ie: get_strict_encoding -> "strict_encoding" + # ie: get_strict_encoding -> "strict_encoding" einfo["selection"] = self.get_best_encoding.__name__.replace("get_", "") except AttributeError: pass - #"encodings" info: + # "encodings" info: esinfo : dict[str,Any] = { "" : self.encodings, "core" : self.core_encodings, @@ -557,7 +556,7 @@ def get_info(self) -> dict[str,Any]: } }) - #remove large default dict: + # remove large default dict: info.update({ "idle" : self.is_idle, "dimensions" : self.window_dimensions, @@ -570,7 +569,7 @@ def get_info(self) -> dict[str,Any]: }, "encodings" : esinfo, "rgb_threshold" : self._rgb_auto_threshold, - "mmap" : self._mmap_size>0, + "mmap" : self._mmap_size > 0, "last_used" : self.encoding_last_used or "", "full-frames-only" : self.full_frames_only, "supports-transparency" : self.supports_transparency, @@ -608,14 +607,15 @@ def get_damage_fps(self) -> int: cutoff = now-5 lde = tuple(x[0] for x in tuple(self.statistics.last_damage_events) if x[0]>=cutoff) fps = 0 - if len(lde)>=2: + if len(lde) >= 2: elapsed = now-min(lde) - if elapsed>0: + if elapsed > 0: fps = round(len(lde) / elapsed) return fps def get_quality_speed_info(self) -> dict[str,Any]: info = {} + def add_list_info(prefix, v, vinfo): if not v: return @@ -633,7 +633,7 @@ def add_list_info(prefix, v, vinfo): def get_property_info(self) -> dict[str,Any]: return { "fullscreen" : self.fullscreen or False, - #speed / quality properties (not necessarily the same as the video encoder settings..): + # speed / quality properties (not necessarily the same as the video encoder settings..): "encoding-hint" : self._encoding_hint or "", "speed" : { "min" : self._fixed_min_speed, @@ -653,7 +653,6 @@ def get_property_info(self) -> dict[str,Any]: }, } - def go_idle(self) -> None: self.is_idle = True self.lock_batch_delay(LOCKED_BATCH_DELAY) @@ -766,9 +765,9 @@ def window_opaque_region_changed(self, window, *args) -> bool: return True def set_client_properties(self, properties : typedict) -> None: - #filter out stuff we don't care about - #to see if there is anything to set at all, - #and if not, don't bother doing the potentially expensive update_encoding_selection() + # filter out stuff we don't care about + # to see if there is anything to set at all, + # and if not, don't bother doing the potentially expensive update_encoding_selection() for k in ("workspace", "screen"): properties.pop(k, None) if properties: @@ -786,27 +785,25 @@ def do_set_client_properties(self, properties : typedict) -> None: self.decoder_speed = typedict(properties.dictget("decoder-speed") or self.decoder_speed) rgb_formats = properties.strtupleget("encodings.rgb_formats", self.rgb_formats) if not self.supports_transparency: - #remove rgb formats with alpha + # remove rgb formats with alpha rgb_formats = tuple(x for x in rgb_formats if x.find("A")<0) self.rgb_formats = rgb_formats self.send_window_size = properties.boolget("encoding.send-window-size", self.send_window_size) self.parse_csc_modes(properties.dictget("encoding.full_csc_modes", default=None)) - #select the defaults encoders: - #(in case pillow was selected previously and the client side scaling changed) + # select the defaults encoders: + # (in case pillow was selected previously and the client side scaling changed) for encoding, encoders in self._all_encoders.items(): self._encoders[encoding] = encoders[0] self.update_encoding_selection(self.encoding) - def parse_csc_modes(self, full_csc_modes) -> None: - #only override if values are specified: + # only override if values are specified: log("parse_csc_modes(%s) current value=%s", full_csc_modes, self.full_csc_modes) if full_csc_modes is not None and isinstance(full_csc_modes, dict): self.full_csc_modes = typedict() for enc, csc_formats in full_csc_modes.items(): self.full_csc_modes[enc] = tuple(csc_formats) - def set_auto_refresh_delay(self, d:int) -> None: self.auto_refresh_delay = d self.update_refresh_attributes() @@ -818,14 +815,14 @@ def set_av_sync_delay(self, new_delay:int) -> None: self.av_sync_delay_base = new_delay def may_update_av_sync_delay(self) -> None: - #set the target then schedule a timer to gradually - #get the actual value "av_sync_delay" moved towards it + # set the target then schedule a timer to gradually + # get the actual value "av_sync_delay" moved towards it self.av_sync_delay_target = max(0, self.av_sync_delay_base - self.av_sync_frame_delay) avsynclog("may_update_av_sync_delay() target=%s from base=%s, frame-delay=%s", self.av_sync_delay_target, self.av_sync_delay_base, self.av_sync_frame_delay) self.schedule_av_sync_update() - def schedule_av_sync_update(self, delay:int=0) -> None: + def schedule_av_sync_update(self, delay=0) -> None: avsynclog("schedule_av_sync_update(%i) wid=%i, delay=%i, target=%i, timer=%s", delay, self.wid, self.av_sync_delay, self.av_sync_delay_target, self.av_sync_timer) if self.av_sync_timer: @@ -833,39 +830,39 @@ def schedule_av_sync_update(self, delay:int=0) -> None: if not self.av_sync: self.av_sync_delay = 0 return - if self.av_sync_delay==self.av_sync_delay_target: - return #already up to date + if self.av_sync_delay == self.av_sync_delay_target: + return # already up to date if self.av_sync_timer: - return #already scheduled + return # already scheduled self.av_sync_timer = self.timeout_add(delay, self.update_av_sync_delay) def update_av_sync_delay(self) -> None: self.av_sync_timer = 0 delta = self.av_sync_delay_target-self.av_sync_delay - if delta==0: + if delta == 0: return - #limit the rate of change: + # limit the rate of change: rdelta = min(AV_SYNC_RATE_CHANGE, max(-AV_SYNC_RATE_CHANGE, delta)) avsynclog("update_av_sync_delay() wid=%i, current=%s, target=%s, adding %s (capped to +-%s from %s)", self.wid, self.av_sync_delay, self.av_sync_delay_target, rdelta, AV_SYNC_RATE_CHANGE, delta) self.av_sync_delay += rdelta - if self.av_sync_delay!=self.av_sync_delay_target: + if self.av_sync_delay != self.av_sync_delay_target: self.schedule_av_sync_update(AV_SYNC_TIME_CHANGE) def set_new_encoding(self, encoding:str, strict:bool) -> None: if strict is not None or STRICT_MODE: self.strict = strict or STRICT_MODE - if self.encoding==encoding: + if self.encoding == encoding: return self.statistics.reset() self.update_encoding_selection(encoding) def update_encoding_selection(self, encoding=None, exclude=(), init:bool=False) -> None: - #now we have the real list of encodings we can use: - #"rgb32" and "rgb24" encodings are both aliased to "rgb" - if self._mmap_size>0 and self.encoding!="grayscale": + # now we have the real list of encodings we can use: + # "rgb32" and "rgb24" encodings are both aliased to "rgb" + if self._mmap_size > 0 and self.encoding!="grayscale": self.auto_refresh_encodings = () self.encoding = "mmap" self.encodings = ("mmap", ) @@ -877,22 +874,24 @@ def update_encoding_selection(self, encoding=None, exclude=(), init:bool=False) if not self.common_encodings: raise ValueError("no common encodings found (server: %s vs client: %s, excluding: %s)" % ( csv(self._encoders.keys()), csv(self.core_encodings), csv(exclude))) - #ensure the encoding chosen is supported by this source: - if (encoding in self.common_encodings or encoding in ("stream", "auto", "grayscale")) and len(self.common_encodings)>1: + # ensure the encoding chosen is supported by this source: + if encoding in self.common_encodings or ( + encoding in ("stream", "auto", "grayscale") and len(self.common_encodings)>1 + ): self.encoding = encoding else: self.encoding = self.common_encodings[0] log("ws.update_encoding_selection(%s, %s, %s) encoding=%s, common encodings=%s", encoding, exclude, init, self.encoding, self.common_encodings) assert self.encoding is not None - #auto-refresh: + # auto-refresh: if self.client_refresh_encodings: - #client supplied list, honour it: + # client supplied list, honour it: ropts = set(self.client_refresh_encodings) else: - #sane defaults: + # sane defaults: ropts = set(REFRESH_ENCODINGS) #default encodings for auto-refresh - if (self.refresh_quality<100 or not TRUE_LOSSLESS) and self.image_depth>16: + if (self.refresh_quality < 100 or not TRUE_LOSSLESS) and self.image_depth > 16: ropts.add("jpeg") ropts.add("jpega") are : tuple[str,...] = () @@ -916,19 +915,19 @@ def update_encoding_options(self, force_reload:bool=False) -> None: for coords in opr: r = rectangle(*coords) if r.contains(0, 0, ww, wh): - #window is fully opaque + # window is fully opaque self._want_alpha = False break self._lossless_threshold_base = max(0, min(90, 60+self._current_speed//5 + int(cv*100) - int(self.is_shadow)*20)) self._lossless_threshold_pixel_boost = max(5, 20-self._current_speed//5) - #calculate the threshold for using rgb - #if speed is high, assume we have bandwidth to spare + # calculate the threshold for using rgb + # if speed is high, assume we have bandwidth to spare smult = max(0.25, (self._current_speed-50)/5.0) qmult = max(0.0, self._current_quality/20.0) pcmult = min(20.0, 0.5+self.statistics.packet_count)/20.0 max_rgb_threshold = 16*1024 min_rgb_threshold = 2048 - if cv>0.1: + if cv > 0.1: max_rgb_threshold = int(32*1024/(1+cv)) min_rgb_threshold = 1024 bwl = self.bandwidth_limit @@ -938,23 +937,23 @@ def update_encoding_options(self, force_reload:bool=False) -> None: v = int(MAX_PIXELS_PREFER_RGB * pcmult * smult * qmult * weight) crs = self.client_render_size if crs and DOWNSCALE and (crs[0]0: - #with mmap, we can move lots of data around easily - #so favour large screen updates over small packets + if self._mmap and self._mmap_size > 0: + # with mmap, we can move lots of data around easily + # so favour large screen updates over small packets self.max_small_regions = 10 self.max_bytes_percent = 25 self.small_packet_cost = 4096 - elif self.content_type=="desktop": - #in desktop mode, many areas will be updating - #so favour large screen updates + elif self.content_type == "desktop": + # in desktop mode, many areas will be updating + # so favour large screen updates self.max_small_regions = 20 self.max_bytes_percent = 40 self.assign_encoding_getter() @@ -977,18 +976,18 @@ def get_best_encoding_impl(self) -> Callable: if self._encoding_hint and self._encoding_hint in self._encoders: log("using encoding hint: %r", self._encoding_hint) return self.encoding_is_hint - #choose which method to use for selecting an encoding - #first the easy ones (when there is no choice): - if self._mmap_size>0 and self.encoding!="grayscale": + # choose which method to use for selecting an encoding + # first the easy ones (when there is no choice): + if self._mmap_size > 0 and self.encoding!="grayscale": log("using mmap") return self.encoding_is_mmap - if self.encoding=="png/L": - #(png/L would look awful if we mixed it with something else) + if self.encoding == "png/L": + # (png/L would look awful if we mixed it with something else) log("using png/L") return self.encoding_is_pngL - if self.image_depth==8 or self.encoding=="png/P": - #limited options: - if self.encoding=="grayscale": + if self.image_depth == 8 or self.encoding == "png/P": + # limited options: + if self.encoding == "grayscale": assert "png/L" in self.common_encodings log("using png/L") return self.encoding_is_pngL @@ -996,10 +995,10 @@ def get_best_encoding_impl(self) -> Callable: assert "png/P" in self.common_encodings return self.encoding_is_pngP if self.strict and self.encoding not in ("auto", "stream"): - #honour strict flag - if self.encoding=="rgb": - #choose between rgb32 and rgb24 already - #as alpha support does not change without going through this method + # honour strict flag + if self.encoding == "rgb": + # choose between rgb32 and rgb24 already + # as alpha support does not change without going through this method if self._want_alpha and "rgb32" in self.common_encodings: log("using rgb32 for strict mode alpha") return self.encoding_is_rgb32 @@ -1012,19 +1011,19 @@ def get_best_encoding_impl(self) -> Callable: log("using rgb32 for tray") return self.encoding_is_rgb32 if self.encoding in ("png", "png/P", "png/L"): - #chosen encoding does alpha, stick to it: - #(prevents alpha bleeding artifacts, + # chosen encoding does alpha, stick to it: + # (prevents alpha bleeding artifacts, # as different encoders may encode alpha differently) log("using %s for tray", self.encoding) return self.get_strict_encoding - if self.encoding=="grayscale": + if self.encoding == "grayscale": log("using grayscale") return self.encoding_is_grayscale - #choose an alpha encoding and keep it? + # choose an alpha encoding and keep it? log("using transparent encoding") return self.get_transparent_encoding - if self.encoding=="rgb": - #if we're here we don't need alpha, so try rgb24 first: + if self.encoding == "rgb": + # if we're here we don't need alpha, so try rgb24 first: if "rgb24" in self.common_encodings: log("using rgb24 for rgb encoding") return self.encoding_is_rgb24 @@ -1035,11 +1034,11 @@ def get_best_encoding_impl(self) -> Callable: return self.get_best_encoding_impl_default() def get_best_encoding_impl_default(self) -> Callable: - #stick to what is specified or use rgb for small regions: + # stick to what is specified or use rgb for small regions: if self.encoding in ("auto", "stream"): log(f"using {self.encoding}") return self.get_auto_encoding - if self.encoding=="grayscale": + if self.encoding == "grayscale": log(f"using {self.encoding}") return self.encoding_is_grayscale log("using current or rgb") @@ -1082,7 +1081,7 @@ def encoding_is_grayscale(self, *args) -> str: return e def get_transparent_encoding(self, w:int, h:int, options, current_encoding:str) -> str: - #small areas prefer rgb, also when high speed and high quality + # small areas prefer rgb, also when high speed and high quality if current_encoding in TRANSPARENCY_ENCODINGS: return current_encoding pixel_count = w*h @@ -1091,25 +1090,25 @@ def get_transparent_encoding(self, w:int, h:int, options, current_encoding:str) if not co and current_encoding: return current_encoding quality = options.get("quality", self._current_quality) - lossy = quality<100 + lossy = quality < 100 if "rgb32" in co and ( - (pixel_count24 and self.client_bit_depth>24) + (pixel_count < self._rgb_auto_threshold) or + # the only encoding that can preserve higher bit depth at present: + (not lossy and depth > 24 and self.client_bit_depth > 24) ): return "rgb32" - grayscale = self.encoding=="grayscale" - webp = "webp" in co and 16383>=w>=2 and 16383>=h>=2 and not grayscale - if webp and depth in (24, 32) and w*h<=WEBP_EFFICIENCY_CUTOFF: + grayscale = self.encoding == "grayscale" + webp = "webp" in co and 16383 >= w >=2 and 16383 >= h >= 2 and not grayscale + if webp and depth in (24, 32) and w*h <= WEBP_EFFICIENCY_CUTOFF: return "webp" - if "jpega" in co and w>=2 and h>=2 and (lossy or not TRUE_LOSSLESS): + if "jpega" in co and w >= 2 and h >= 2 and (lossy or not TRUE_LOSSLESS): return "jpega" if webp: return "webp" for e in ("png", "rgb32"): if e in co: return e - #so we don't have an encoding that does transparency... any will do: + # so we don't have an encoding that does transparency... any will do: return self.get_auto_encoding(w, h, options, current_encoding) def get_auto_encoding(self, w, h, options, current_encoding=None) -> str: @@ -1120,23 +1119,23 @@ def do_get_auto_encoding(self, w, h, options, current_encoding, encoding_options if not co: raise ValueError("no options to choose from") depth = self.image_depth - grayscale = self.encoding=="grayscale" + grayscale = self.encoding == "grayscale" alpha = self._want_alpha or self.is_tray quality = options.get("quality", 0) - if self._lossless_threshold_base24 and self.client_bit_depth>24 and "rgb32" in co: + if w*h < self._rgb_auto_threshold and not grayscale: + if depth > 24 and self.client_bit_depth > 24 and "rgb32" in co: return "rgb32" if "rgb24" in co: return "rgb24" - jpeg = "jpeg" in co and w>=2 and h>=2 - jpega = "jpega" in co and w>=2 and h>=2 - webp = "webp" in co and 16383>=w>=2 and 16383>=h>=2 and not grayscale + jpeg = "jpeg" in co and w >= 2 and h >= 2 + jpega = "jpega" in co and w >= 2 and h >= 2 + webp = "webp" in co and 16383 >= w >= 2 and 16383 >= h >= 2 and not grayscale avif = "avif" in co - lossy = quality<100 + lossy = quality < 100 if depth in (24, 32) and (jpeg or jpega or webp or avif): - if webp and (not lossy or w*h<=WEBP_EFFICIENCY_CUTOFF): + if webp and (not lossy or w*h <= WEBP_EFFICIENCY_CUTOFF): return "webp" if lossy or not TRUE_LOSSLESS: if jpeg and not alpha: @@ -1147,11 +1146,11 @@ def do_get_auto_encoding(self, w, h, options, current_encoding, encoding_options return "webp" if avif: return "avif" - elif depth>24 and "rgb32" in co and self.client_bit_depth>24 and self.client_bit_depth!=32: - #the only encoding that can do higher bit depth at present - #(typically r210 which is actually rgb30+2) + elif depth > 24 and "rgb32" in co and self.client_bit_depth > 24 and self.client_bit_depth != 32: + # the only encoding that can do higher bit depth at present + # (typically r210 which is actually rgb30+2) return "rgb32" - if "png" in co and (not lossy or depth<=16): + if "png" in co and (not lossy or depth <= 16): return "png" if jpeg: return "jpeg" @@ -1160,18 +1159,17 @@ def do_get_auto_encoding(self, w, h, options, current_encoding, encoding_options if current_encoding in co: return current_encoding try: - return next(x for x in co if x!="rgb") + return next(x for x in co if x != "rgb") except StopIteration: return co[0] def get_current_or_rgb(self, pixel_count, *_args) -> str: - if pixel_count None: self.mapped_at = mapped_at self.no_idle() @@ -1181,7 +1179,6 @@ def unmap(self) -> None: self.statistics.reset() self.go_idle() - def cancel_damage(self, limit=0.0) -> None: """ Use this method to cancel all currently pending and ongoing @@ -1190,7 +1187,7 @@ def cancel_damage(self, limit=0.0) -> None: """ damagelog("cancel_damage(%s) wid=%s, dropping delayed region %s, %s queued encodes, and all sequences up to %s", limit, self.wid, self._damage_delayed, len(self.encode_queue), self._sequence) - #for those in flight, being processed in separate threads, drop by sequence: + # for those in flight, being processed in separate threads, drop by sequence: self._damage_cancelled = limit or self._sequence self.cancel_expire_timer() self.cancel_may_send_timer() @@ -1199,13 +1196,13 @@ def cancel_damage(self, limit=0.0) -> None: self.cancel_timeout_timer() self.cancel_av_sync_timer() self.cancel_decode_error_refresh_timer() - #if a region was delayed, we can just drop it now: + # if a region was delayed, we can just drop it now: self.refresh_regions = [] self._damage_delayed = None - #make sure we don't account for those as they will get dropped - #(generally before encoding - only one may still get encoded): + # make sure we don't account for those as they will get dropped + # (generally before encoding - only one may still get encoded): for sequence in tuple(self.statistics.encoding_pending.keys()): - if self._damage_cancelled>=sequence: + if self._damage_cancelled >= sequence: self.statistics.encoding_pending.pop(sequence, None) def cancel_expire_timer(self) -> None: @@ -1246,51 +1243,49 @@ def cancel_av_sync_timer(self) -> None: self.av_sync_timer = 0 self.source_remove(avst) - def is_cancelled(self, sequence=MAX_SEQUENCE) -> bool: """ See cancel_damage(wid) """ - return self._damage_cancelled>=sequence - + return self._damage_cancelled >= sequence def calculate_batch_delay(self, has_focus, other_is_fullscreen, other_is_maximized) -> None: bc = self.batch_config if bc.locked: return - if self._mmap_size>0: - #mmap is so fast that we don't need to use the batch delay: + if self._mmap_size > 0: + # mmap is so fast that we don't need to use the batch delay: bc.delay = bc.min_delay return - #calculations take time (CPU), see if we can just skip it this time around: + # calculations take time (CPU), see if we can just skip it this time around: now = monotonic() lr = self.statistics.last_recalculate elapsed = now-lr statslog("calculate_batch_delay for wid=%i current batch delay=%i, last update %.1f seconds ago", self.wid, bc.delay, elapsed) - if bc.delay<=2*bc.start_delay and lr>0 and elapsed<60 and self.get_packets_backlog()==0: - #delay is low-ish, figure out if we should bother updating it + if bc.delay <= 2*bc.start_delay and lr > 0 and elapsed < 60 and self.get_packets_backlog() == 0: + # delay is low-ish, figure out if we should bother updating it lde = tuple(self.statistics.last_damage_events) if not lde: - return #things must have got reset anyway + return # things must have got reset anyway since_last = tuple((pixels, compressed_size) for t, _, pixels, _, compressed_size, _ - in tuple(self.statistics.encoding_stats) if t>=lr) + in tuple(self.statistics.encoding_stats) if t >= lr) if len(since_last)<=5: statslog("calculate_batch_delay for wid=%i, skipping - only %i events since the last update", self.wid, len(since_last)) return pixel_count = sum(v[0] for v in since_last) ww, wh = self.window_dimensions - if pixel_count<=ww*wh: + if pixel_count <= ww*wh: statslog("calculate_batch_delay for wid=%i, skipping - only %i pixels updated since the last update", self.wid, pixel_count) return - if self._mmap_size<=0: + if self._mmap_size <= 0: statslog("calculate_batch_delay for wid=%i, %i pixels updated since the last update", self.wid, pixel_count) - #if pixel_count<8*ww*wh: + # if pixel_count < 8*ww*wh: nbytes = sum(v[1] for v in since_last) - #less than 16KB/s since last time? (or <=64KB) + # less than 16KB/s since last time? (or <=64KB) max_bytes = max(4, int(elapsed))*16*1024 - if nbytes<=max_bytes: + if nbytes <= max_bytes: statslog("calculate_batch_delay for wid=%i, skipping - only %i bytes sent since the last update", self.wid, nbytes) return @@ -1299,7 +1294,7 @@ def calculate_batch_delay(self, has_focus, other_is_fullscreen, other_is_maximiz other_is_fullscreen, other_is_maximized, self.is_OR, self.soft_expired, bc, self.global_statistics, self.statistics, self.bandwidth_limit, self.jitter) - #update the normalized value: + # update the normalized value: ww, wh = self.window_dimensions bc.delay_per_megapixel = int(bc.delay*1000000//max(1, (ww*wh))) self.statistics.last_recalculate = now @@ -1313,30 +1308,30 @@ def update_speed(self) -> None: if self.is_cancelled(): return statslog("update_speed() suspended=%s, mmap=%s, current=%i, hint=%i, fixed=%i, encoding=%s, sequence=%i", - self.suspended, self._mmap_size>0, + self.suspended, self._mmap_size > 0, self._current_speed, self._speed_hint, self._fixed_speed, self.encoding, self._sequence) if self.suspended: self._encoding_speed_info = {"suspended" : True} return - if self._mmap_size>0: + if self._mmap_size > 0: self._encoding_speed_info = {"mmap" : True} return speed = self._speed_hint - if speed>=0: + if speed >= 0: self._current_speed = capr(speed) self._encoding_speed_info = {"hint" : True} return speed = self._fixed_speed - if speed>=0: + if speed >= 0: self._current_speed = capr(speed) self._encoding_speed_info = {"fixed" : True} return - if self._sequence<10: + if self._sequence < 10: self._encoding_speed_info = {"pending" : True} return now = monotonic() - #make a copy to work on: + # make a copy to work on: speed_data = list(self._encoding_speed) info, target, max_speed = get_target_speed(self.window_dimensions, self.batch_config, self.global_statistics, self.statistics, @@ -1356,14 +1351,14 @@ def update_speed(self) -> None: def set_min_speed(self, min_speed:int) -> None: min_speed = capr(min_speed) if self._fixed_min_speed!=min_speed: - if min_speed>0: + if min_speed > 0: self._fixed_speed = 0 self._fixed_min_speed = min_speed self.reconfigure(True) def set_max_speed(self, max_speed:int) -> None: max_speed = capr(max_speed) - if self._fixed_max_speed!=max_speed: + if self._fixed_max_speed != max_speed: self._fixed_max_speed = max_speed self.reconfigure(True) @@ -1374,7 +1369,6 @@ def set_speed(self, speed:int) -> None: self._current_speed = speed self.reconfigure(True) - def update_quality(self) -> None: if self.is_cancelled(): return @@ -1385,26 +1379,26 @@ def update_quality(self) -> None: if self.suspended: self._encoding_quality_info = {"suspended" : True} return - if self._mmap_size>0: + if self._mmap_size > 0: self._encoding_quality_info = {"mmap" : True} return quality = self._quality_hint - if quality>=0: + if quality >= 0: self._current_quality = capr(quality) self._encoding_quality_info = {"hint" : True} return quality = self._fixed_quality - if quality>=0: + if quality >= 0: self._current_quality = capr(quality) self._encoding_quality_info = {"fixed" : True} return if self.encoding in LOSSLESS_ENCODINGS: - #the user has selected an encoding which does not use quality - #so skip the calculations! + # the user has selected an encoding which does not use quality + # so skip the calculations! self._encoding_quality_info = {"lossless" : self.encoding} self._current_quality = 100 return - if self._sequence<10: + if self._sequence < 10: self._encoding_quality_info = {"pending" : True} return if self.window_type.intersection(LOSSLESS_WINDOW_TYPES): @@ -1415,7 +1409,7 @@ def update_quality(self) -> None: info, target = get_target_quality(self.window_dimensions, self.batch_config, self.global_statistics, self.statistics, self.bandwidth_limit, self._fixed_min_quality, self._fixed_min_speed) - #make a copy to work on: + # make a copy to work on: ves_copy = list(self._encoding_quality) ves_copy.append((now, target)) quality = int(time_weighted_average(ves_copy, min_offset=0.1, rpow=1.2)) @@ -1431,7 +1425,7 @@ def update_quality(self) -> None: def set_min_quality(self, min_quality:int) -> None: min_quality = capr(min_quality) if self._fixed_min_quality!=min_quality: - if min_quality>0: + if min_quality > 0: self._fixed_quality = 0 self._fixed_min_quality = min_quality self.update_quality() @@ -1451,17 +1445,16 @@ def set_quality(self, quality:int) -> None: self._current_quality = self._fixed_quality self.reconfigure(True) - def update_refresh_attributes(self) -> None: - if self._mmap_size>0: - #not used since mmap is lossless + if self._mmap_size > 0: + # not used since mmap is lossless return - if self.auto_refresh_delay==0: + if self.auto_refresh_delay == 0: self.base_auto_refresh_delay = 0 return ww, wh = self.window_dimensions cv = self.global_statistics.congestion_value - #try to take into account: + # try to take into account: # - window size: bigger windows are more costly, refresh more slowly # - when quality is low, we can refresh more slowly # - when speed is low, we can also refresh slowly @@ -1470,17 +1463,17 @@ def update_refresh_attributes(self) -> None: qf = (150-self._current_quality)/100.0 sf = (150-self._current_speed)/100.0 cf = (100+cv*500)/100.0 #high congestion value -> very high delay - #bandwidth limit is used to set a minimum on the delay + # bandwidth limit is used to set a minimum on the delay min_delay = int(max(100*cf, self.auto_refresh_delay, 50 * sizef, self.batch_config.delay*4)) bwl = self.bandwidth_limit or 0 - if bwl>0: - #1Mbps -> 1s, 10Mbps -> 0.1s + if bwl > 0: + # 1Mbps -> 1s, 10Mbps -> 0.1s min_delay = max(min_delay, 1000*1000*1000//bwl) max_delay = int(1000*cf) raw_delay = int(sizef * qf * sf * cf) if self.content_type.find("text")>=0: raw_delay = raw_delay*2//3 - elif self.content_type=="video": + elif self.content_type == "video": raw_delay = raw_delay*3//2 delay = max(min_delay, min(max_delay, raw_delay)) refreshlog("update_refresh_attributes() wid=%i, sizef=%.2f, content-type=%s, qf=%.2f, sf=%.2f, cf=%.2f, batch delay=%i, bandwidth-limit=%s, min-delay=%i, max-delay=%i, delay=%i", @@ -1489,12 +1482,12 @@ def update_refresh_attributes(self) -> None: rs = AUTO_REFRESH_SPEED rq = AUTO_REFRESH_QUALITY bits_per_pixel = bwl/(1+ww*wh) - if self._current_quality<70 and (cv>0.1 or (bwl>0 and bits_per_pixel<1)): - #when bandwidth is scarce, don't use lossless refresh, - #switch to almost-lossless: + if self._current_quality < 70 and (cv > 0.1 or (bwl > 0 and bits_per_pixel < 1)): + # when bandwidth is scarce, don't use lossless refresh, + # switch to almost-lossless: rs = AUTO_REFRESH_SPEED//2 rq = 100-cv*10 - if bwl>0: + if bwl > 0: rq -= sqrt(1000*1000//bwl) rs = min(50, max(0, rs)) rq = min(99, max(80, int(rq), self._current_quality+30)) @@ -1507,14 +1500,12 @@ def do_set_auto_refresh_delay(self, min_delay:int, delay:int) -> None: self.min_auto_refresh_delay = int(min_delay) self.base_auto_refresh_delay = int(delay) - def reconfigure(self, force_reload=False) -> None: self.update_quality() self.update_speed() self.update_encoding_options(force_reload) self.update_refresh_attributes() - def damage(self, x : int, y : int, w : int, h : int, options=None) -> None: """ decide what to do with the damage area: * send it now (if not congested) @@ -1531,24 +1522,24 @@ def damage(self, x : int, y : int, w : int, h : int, options=None) -> None: self.ui_thread_check() if self.suspended: return - if w==0 or h==0: + if w == 0 or h == 0: damagelog("damage%-24s ignored zero size", (x, y, w, h, options)) - #we may fire damage ourselves, - #in which case the dimensions may be zero (if so configured by the client) + # we may fire damage ourselves, + # in which case the dimensions may be zero (if so configured by the client) return now = monotonic() if options is None: options = {} if options.pop("damage", False): damagelog("damage%s wid=%i", (x, y, w, h, options), self.wid) - self.statistics.last_damage_events.append((now, x,y,w,h)) + self.statistics.last_damage_events.append((now, x, y, w, h)) self.global_statistics.damage_events_count += 1 self.statistics.damage_events_count += 1 ww, wh = self.may_update_window_dimensions() - if ww==0 or wh==0: + if ww == 0 or wh == 0: damagelog("damage%s window size %ix%i ignored", (x, y, w, h, options), ww, wh) return - if ww>MAX_WINDOW_SIZE or wh>MAX_WINDOW_SIZE: + if ww > MAX_WINDOW_SIZE or wh > MAX_WINDOW_SIZE: if first_time(f"window-oversize-{self.wid}"): damagelog("") damagelog.warn("Warning: invalid window dimensions %ix%i for window %i", ww, wh, self.wid) @@ -1566,45 +1557,45 @@ def do_damage(self, ww : int, wh : int, x : int, y : int, w : int, h : int, opti if self.refresh_timer and options.get("quality", self._current_quality)0: + if overlap > 0: pct = int(min(100, 100*overlap//(ww*wh)) * (1+self.global_statistics.congestion_value)) sched_delay = max(self.min_auto_refresh_delay, int(self.base_auto_refresh_delay * pct // 100)) self.refresh_target_time = max(self.refresh_target_time, now + sched_delay/1000.0) delayed = self._damage_delayed if delayed: - #use existing delayed region: + # use existing delayed region: regions = delayed.regions if not self.full_frames_only: region = rectangle(x, y, w, h) add_rectangle(regions, region) - #merge/override options + # merge/override options if options is not None: override = options.get("override_options", False) existing_options = delayed.options for k in options.keys(): - if k=="override_options": + if k == "override_options": continue if override or k not in existing_options: existing_options[k] = options[k] damagelog("do_damage%-24s wid=%s, using existing %i delayed regions created %.1fms ago", (x, y, w, h, options), self.wid, len(regions), now-delayed.damage_time) - if not self.expire_timer and not self.soft_timer and self.soft_expired==0: + if not self.expire_timer and not self.soft_timer and self.soft_expired == 0: log.error("Error: bug, found a delayed region without a timer!") self.expire_timer = self.timeout_add(0, self.expire_delayed_region, now) return - #create a new delayed region: + # create a new delayed region: regions = [rectangle(x, y, w, h)] delay = options.get("delay", self.batch_config.delay) resize_elapsed = int(1000*(now-self.statistics.last_resized)) - if resize_elapsed<500: + if resize_elapsed < 500: try: - #batch more when recently resized, - #but only if this is not the first recent resize event: + # batch more when recently resized, + # but only if this is not the first recent resize event: if now-self.statistics.resize_events[-2]<1: delay += (500-resize_elapsed)//2 except IndexError: @@ -1613,15 +1604,15 @@ def do_damage(self, ww : int, wh : int, x : int, y : int, w : int, h : int, opti congestion_elapsed = -1 if gs: congestion_elapsed = int(1000*(now-gs.last_congestion_time)) - if congestion_elapsed<1000: + if congestion_elapsed < 1000: delay += (1000-congestion_elapsed)//4 - #raise min_delay with qsize: + # raise min_delay with qsize: min_delay = max(0, self.batch_config.min_delay * max(2, self.queue_size())//2 - FRAME_OVERHEAD) delay = max(delay, options.get("min_delay", min_delay)) delay = min(delay, options.get("max_delay", self.batch_config.max_delay)) delay = int(delay) elapsed = int(1000*(now-self.batch_config.last_event)) - #discount the elapsed time since the last event: + # discount the elapsed time since the last event: target_delay = delay delay = max(0, delay-elapsed) actual_encoding = options.get("encoding", self.encoding) @@ -1630,8 +1621,8 @@ def do_damage(self, ww : int, wh : int, x : int, y : int, w : int, h : int, opti self.batch_config.last_delays.append(lad) self.batch_config.last_delay = lad expire_delay = min(self.batch_config.expire_delay, delay) - #weighted average with the last delays: - #(so when we end up delaying a lot for some reason, + # weighted average with the last delays: + # (so when we end up delaying a lot for some reason, # then we don't expire the next one quickly after) inc = 0 try: @@ -1640,7 +1631,7 @@ def do_damage(self, ww : int, wh : int, x : int, y : int, w : int, h : int, opti continue when, d = v delta = now-when - if d>expire_delay and delta<5: + if d > expire_delay and delta < 5: weight = (5-delta)/10 inc = max(inc, int((d-expire_delay)*weight)) expire_delay += inc @@ -1667,7 +1658,6 @@ def update_window_dimensions(self, ww, wh): self.window_dimensions = ww, wh self.encode_queue_max_size = max(2, min(30, MAX_SYNC_BUFFER_SIZE//(ww*wh*4))) - def get_packets_backlog(self) -> int: s = self.statistics gs = self.global_statistics @@ -1676,7 +1666,7 @@ def get_packets_backlog(self) -> int: latency_tolerance_pct = int(min(self._damage_packet_sequence, 10) * min(monotonic()-gs.last_congestion_time, 10)) latency = s.target_latency + ACK_JITTER/1000*(1+latency_tolerance_pct/100) - #log("get_packets_backlog() latency=%s (target=%i, tolerance=%i)", + # log("get_packets_backlog() latency=%s (target=%i, tolerance=%i)", # 1000*latency, 1000*s.target_latency, latency_tolerance_pct) return s.get_late_acks(latency) @@ -1688,10 +1678,10 @@ def expire_delayed_region(self, due=0, target_delay=100) -> bool: delayed = self._damage_delayed if not delayed: damagelog("expire_delayed_region() already processed") - #region has been sent + # region has been sent return False if self.soft_timer: - #a soft timer will take care of it soon + # a soft timer will take care of it soon damagelog("expire_delayed_region() soft timer will take care of it") return False damagelog("expire_delayed_region(%i, %i) delayed region=%s", due, target_delay, delayed) @@ -1699,23 +1689,23 @@ def expire_delayed_region(self, due=0, target_delay=100) -> bool: self.cancel_may_send_timer() self.may_send_delayed() if not self._damage_delayed: - #got sent + # got sent return False now = monotonic() - if now bool: self.soft_timer = self.timeout_add(soft_delay, self.delayed_region_soft_timeout) else: damagelog("expire_delayed_region: soft expire limit reached: %i", max_soft_expired) - if max_soft_expired==self.max_soft_expired: - #only record this congestion if this is a new event, - #otherwise we end up perpetuating it - #because congestion events lower the latency tolerance - #which makes us more sensitive to packets backlog + if max_soft_expired == self.max_soft_expired: + # only record this congestion if this is a new event, + # otherwise we end up perpetuating it + # because congestion events lower the latency tolerance + # which makes us more sensitive to packets backlog celapsed = monotonic()-self.global_statistics.last_congestion_time - if celapsed<10: + if celapsed < 10: late_pct = 2*100*self.soft_expired delay = now-due self.networksend_congestion_event(f"soft-expire limit: {delay}ms,"+ f" {self.soft_expired}/{self.max_soft_expired}", late_pct) - #NOTE: this should never happen... - #the region should now get sent when we eventually receive the pending ACKs - #but if somehow they go missing... clean it up from a timeout: + # NOTE: this should never happen... + # the region should now get sent when we eventually receive the pending ACKs + # but if somehow they go missing... clean it up from a timeout: if not self.timeout_timer: delayed_region_time = delayed.damage_time self.timeout_timer = self.timeout_add(self.batch_config.timeout_delay, @@ -1755,20 +1745,20 @@ def delayed_region_timeout(self, delayed_region_time) -> bool: self.timeout_timer = 0 delayed = self._damage_delayed if delayed is None: - #delayed region got sent + # delayed region got sent return False region_time = delayed.damage_time if region_time!=delayed_region_time: - #this is a different region + # this is a different region return False - #ouch: same region! + # ouch: same region! now = monotonic() options = delayed.options elapsed = int(1000 * (now - region_time)) log.warn("Warning: delayed region timeout") log.warn(" region is %i seconds old, will retry - bad connection?", elapsed//1000) self._log_late_acks(log.warn) - #re-try: cancel anything pending and do a full quality refresh + # re-try: cancel anything pending and do a full quality refresh self.cancel_damage() self.cancel_expire_timer() self.cancel_refresh_timer() @@ -1789,10 +1779,9 @@ def _log_late_acks(self, log_fn : Callable) -> None: else: log_fn(" %6i %-5s: %3is", seq, ack_data[1], now-ack_data[3]) - def _may_send_delayed(self) -> None: - #this method is called from the timer, - #we know we can clear it (and no need to cancel it): + # this method is called from the timer, + # we know we can clear it (and no need to cancel it): self.may_send_timer = 0 self.may_send_delayed() @@ -1803,14 +1792,14 @@ def may_send_delayed(self) -> None: log("window %s delayed region already sent", self.wid) return if not dd.expired: - #we must wait for expire_delayed_region() + # we must wait for expire_delayed_region() return damage_time = dd.damage_time packets_backlog = self.get_packets_backlog() now = monotonic() actual_delay = int(1000 * (now-damage_time)) - if packets_backlog>0: - if actual_delay>self.batch_config.timeout_delay: + if packets_backlog > 0: + if actual_delay > self.batch_config.timeout_delay: log("send_delayed for wid %s, elapsed time %ims is above limit of %.1f", self.wid, actual_delay, self.batch_config.timeout_delay) key = f"timeout-damage-delay:{self.wid}-{damage_time}" @@ -1822,52 +1811,52 @@ def may_send_delayed(self) -> None: log("send_delayed for wid %s, sequence %i, delaying again because of backlog:", self.wid, self._sequence) log(" batch delay is %i, elapsed time is %ims", self.batch_config.delay, actual_delay) - if actual_delay>=1000: + if actual_delay >= 1000: self._log_late_acks(log) else: log(" %s packets", packets_backlog) - #this method will fire again from damage_packet_acked + # this method will fire again from damage_packet_acked return - #if we're here, there is no packet backlog, but there may be damage acks pending or a bandwidth limit to honour, - #if there are acks pending, may_send_delayed() should be called again from damage_packet_acked, - #if not, we must either process the region now or set a timer to check again later + # if we're here, there is no packet backlog, but there may be damage acks pending or a bandwidth limit to honour, + # if there are acks pending, may_send_delayed() should be called again from damage_packet_acked, + # if not, we must either process the region now or set a timer to check again later def check_again(delay=actual_delay/10.0): - #schedules a call to check again: + # schedules a call to check again: delay = int(min(self.batch_config.max_delay, max(10.0, delay))) self.may_send_timer = self.timeout_add(delay, self._may_send_delayed) - #locked means a fixed delay we try to honour, - #this code ensures that we don't fire too early if called from damage_packet_acked + # locked means a fixed delay we try to honour, + # this code ensures that we don't fire too early if called from damage_packet_acked if self.batch_config.locked: - if self.batch_config.delay>actual_delay: - #ensure we honour the fixed delay - #(as we may get called from a damage ack before we expire) + if self.batch_config.delay > actual_delay: + # ensure we honour the fixed delay + # (as we may get called from a damage ack before we expire) check_again(self.batch_config.delay-actual_delay) else: self.do_send_delayed() return bwl = self.bandwidth_limit - if bwl>0: + if bwl > 0: used = self.statistics.get_bitrate() bandwidthlog("may_send_delayed() wid=%3i : bandwidth limit=%i, used=%i : %i%%", self.wid, bwl, used, 100*used//bwl) - if used>=bwl: + if used >= bwl: check_again(50) return pixels_encoding_backlog, enc_backlog_count = self.statistics.get_pixels_encoding_backlog() ww, wh = self.window_dimensions - if pixels_encoding_backlog>=(ww*wh): + if pixels_encoding_backlog >= (ww*wh): log("send_delayed for wid %s, delaying again because too many pixels are waiting to be encoded: %s", self.wid, pixels_encoding_backlog) if self.statistics.get_acks_pending()==0: check_again() return - if enc_backlog_count>10: + if enc_backlog_count > 10: log("send_delayed for wid %s, delaying again because too many damage regions are waiting to be encoded: %s", self.wid, enc_backlog_count) if self.statistics.get_acks_pending()==0: check_again() return - #no backlog, so ok to send, clear soft-expired counter: + # no backlog, so ok to send, clear soft-expired counter: self.soft_expired = 0 log("send_delayed for wid %s, batch delay is %ims, elapsed time is %ims", self.wid, self.batch_config.delay, actual_delay) @@ -1908,13 +1897,13 @@ def send_delayed_regions(self, delayed_regions : DelayedRegions) -> None: self.send_regions(dr.damage_time, dr.regions, dr.encoding, dr.options) def send_regions(self, damage_time, regions, coding : str, options) -> None: - #window video source overrides this method - #in order to filter out the video region + # window video source overrides this method + # in order to filter out the video region self.do_send_regions(damage_time, regions, coding, options) def do_send_regions(self, damage_time, regions, coding : str, options, exclude_region=None, get_best_encoding:Callable=None) -> None: - ww,wh = self.window_dimensions + ww, wh = self.window_dimensions options = self.assign_sq_options(options) get_best_encoding = get_best_encoding or self.get_best_encoding def get_encoding(w, h): @@ -1929,16 +1918,16 @@ def send_full_window_update(cause): self.process_damage_region(damage_time, 0, 0, ww, wh, actual_encoding, options) if exclude_region is None: - if self.full_frames_only or self.encoding=="stream": + if self.full_frames_only or self.encoding == "stream": send_full_window_update("full-frames-only set") return if len(regions)>self.max_small_regions: - #too many regions! + # too many regions! send_full_window_update(f"too many regions: {len(regions)}") return - if ww*wh<=MIN_WINDOW_REGION_SIZE: - #size is too small to bother with regions: + if ww*wh <= MIN_WINDOW_REGION_SIZE: + # size is too small to bother with regions: send_full_window_update(f"small window: {ww}x{wh}") return regions = tuple(set(regions)) @@ -1955,10 +1944,10 @@ def send_full_window_update(cause): packet_cost = pixel_count+self.small_packet_cost*len(regions) log("send_delayed_regions: packet_cost=%s, merge_threshold=%s, pixel_count=%s", packet_cost, merge_threshold, pixel_count) - if packet_cost>=merge_threshold and exclude_region is None: + if packet_cost >= merge_threshold and exclude_region is None: send_full_window_update(f"bytes cost ({packet_cost}) too high (max {merge_threshold})") return - #try to merge all the regions to see if we save anything: + # try to merge all the regions to see if we save anything: merged = merge_all(regions) if exclude_region: merged_rects = merged.subtract_rect(exclude_region) @@ -1968,24 +1957,24 @@ def send_full_window_update(cause): merged_pixel_count = merged.width*merged.height merged_packet_cost = merged_pixel_count+self.small_packet_cost*len(merged_rects) log("send_delayed_regions: merged=%s, merged_bytes_cost=%s, bytes_cost=%s, merged_pixel_count=%s, pixel_count=%s", - merged_rects, merged_packet_cost, packet_cost, merged_pixel_count, pixel_count) - if self._mmap_size>0 or merged_packet_cost 0 or merged_packet_cost < packet_cost or merged_pixel_count < pixel_count: + # better, so replace with merged regions: regions = merged_rects if not regions: - #nothing left after removing the exclude region + # nothing left after removing the exclude region return - if len(regions)==1: + if len(regions) == 1: merged = regions[0] - #if we end up with just one region covering almost the entire window, - #refresh the whole window (ie: when the video encoder mask rounded the dimensions down) - if merged.x<=1 and merged.y<=1 and abs(ww-merged.width)<2 and abs(wh-merged.height)<2: + # if we end up with just one region covering almost the entire window, + # refresh the whole window (ie: when the video encoder mask rounded the dimensions down) + if merged.x <= 1 and merged.y <= 1 and abs(ww-merged.width) < 2 and abs(wh-merged.height) < 2: send_full_window_update("merged region covers almost the whole window") return - #figure out which encoding will get used, - #and shortcut out if this needs to be a full window update: + # figure out which encoding will get used, + # and shortcut out if this needs to be a full window update: i_reg_enc = [] for i,region in enumerate(regions): actual_encoding = get_encoding(region.width, region.height) @@ -1993,11 +1982,11 @@ def send_full_window_update(cause): log("send_delayed_regions: using full frame for %s encoding of %ix%i", actual_encoding, region.width, region.height) self.process_damage_region(damage_time, 0, 0, ww, wh, actual_encoding, options) - #we can stop here (full screen update will include the other regions) + # we can stop here (full screen update will include the other regions) return i_reg_enc.append((i, region, actual_encoding)) - #reversed so that i=0 is last for flushing + # reversed so that i=0 is last for flushing log("send_delayed_regions: queuing %i regions", len(i_reg_enc)) encodings = [] for i, region, actual_encoding in reversed(i_reg_enc): @@ -2007,12 +1996,11 @@ def send_full_window_update(cause): encodings.append(actual_encoding) log("send_delayed_regions: queued %i regions for encoding using %s", len(i_reg_enc), encodings) - - def assign_sq_options(self, options, speed_pct : int=100, quality_pct : int=100) -> dict[str,Any]: + def assign_sq_options(self, options, speed_pct : int=100, quality_pct : int=100) -> dict[str, Any]: packets_backlog = None speed = options.get("speed", 0) - if speed==0: - if self._fixed_speed>0: + if speed == 0: + if self._fixed_speed > 0: speed = self._fixed_speed else: speed = self._current_speed @@ -2020,8 +2008,8 @@ def assign_sq_options(self, options, speed_pct : int=100, quality_pct : int=100) speed = (speed - packets_backlog*20) * speed_pct // 100 speed = min(self._fixed_max_speed, max(1, self._fixed_min_speed, speed)) quality = options.get("quality", 0) - if quality==0: - if self._fixed_quality>0: + if quality == 0: + if self._fixed_quality > 0: quality = self._fixed_quality else: quality = self._current_quality @@ -2029,9 +2017,9 @@ def assign_sq_options(self, options, speed_pct : int=100, quality_pct : int=100) packets_backlog = self.get_packets_backlog() now = monotonic() if not packets_backlog: - #if we haven't sent any packets for a while, - #chances are that we can raise the quality, - #at least for the first packet: + # if we haven't sent any packets for a while, + # chances are that we can raise the quality, + # at least for the first packet: elapsed = now-self.statistics.last_packet_time quality += int(elapsed*25) scaling_discount = 0 @@ -2046,7 +2034,7 @@ def assign_sq_options(self, options, speed_pct : int=100, quality_pct : int=100) "rgb_formats" : self.rgb_formats, "lz4" : self.rgb_lz4, }) - if self.encoding=="grayscale": + if self.encoding == "grayscale": eoptions["grayscale"] = True if not self.supports_transparency: eoptions["alpha"] = False @@ -2054,17 +2042,15 @@ def assign_sq_options(self, options, speed_pct : int=100, quality_pct : int=100) eoptions["content-type"] = self.content_type return eoptions - def must_encode_full_frame(self, _encoding : str) -> bool: - #WindowVideoSource overrides this method + # WindowVideoSource overrides this method return self.full_frames_only - def free_image_wrapper(self, image : ImageWrapper) -> None: """ when not running in the UI thread, call this method to free an image wrapper safely """ - #log("free_image_wrapper(%s) thread_safe=%s", image, image.is_thread_safe()) + # log("free_image_wrapper(%s) thread_safe=%s", image, image.is_thread_safe()) if image.is_thread_safe(): image.free() else: @@ -2073,7 +2059,6 @@ def do_free_image(): image.free() self.idle_add(do_free_image) - def get_damage_image(self, x:int, y:int, w:int, h:int) -> ImageWrapper | None: self.ui_thread_check() def nodata(msg, *args) -> ImageWrapper | None: @@ -2082,13 +2067,13 @@ def nodata(msg, *args) -> ImageWrapper | None: if not self.window.is_managed(): return nodata("the window %s is not managed", self.window) ww, wh = self.may_update_window_dimensions() - if x+w<0 or y+h<0: + if x+w < 0 or y+h < 0: return nodata(f"dropped, window is offscreen at {x},{y}") - if x+w>ww or y+h>wh: - #window is now smaller than the region we're trying to request + if x+w > ww or y+h > wh: + # window is now smaller than the region we're trying to request w = ww-x h = wh-y - if w<=0 or h<=0: + if w <= 0 or h <= 0: return nodata(f"dropped, invalid dimensions {w},{h}") self._sequence += 1 sequence = self._sequence @@ -2097,17 +2082,17 @@ def nodata(msg, *args) -> ImageWrapper | None: image = self.window.get_image(x, y, w, h) if image is None: return nodata("no pixel data for window %s, wid=%s", self.window, self.wid) - #image may have been clipped to the new window size during resize: + # image may have been clipped to the new window size during resize: w = image.get_width() h = image.get_height() - if w==0 or h==0: + if w == 0 or h == 0: return nodata("invalid dimensions: %ix%i", w, h) if self.is_cancelled(sequence): self.free_image_wrapper(image) return nodata("sequence %i is cancelled", sequence) pixel_format = image.get_pixel_format() image_depth = image.get_depth() - if image_depth==32 and pixel_format.find("A")>=0: + if image_depth == 32 and pixel_format.find("A") >= 0: alpha = self.has_alpha opr = self._opaque_region if alpha and opr: @@ -2117,7 +2102,7 @@ def nodata(msg, *args) -> ImageWrapper | None: alpha = False break if not alpha: - pixel_format = pixel_format.replace("A", "X") #ie: BGRA -> BGRX + pixel_format = pixel_format.replace("A", "X") # ie: BGRA -> BGRX image.set_pixel_format(pixel_format) image_depth = 24 log("removed alpha from image metadata: %s", pixel_format) @@ -2125,7 +2110,8 @@ def nodata(msg, *args) -> ImageWrapper | None: self.pixel_format = pixel_format return image - def process_damage_region(self, damage_time, x : int, y : int, w : int, h : int, coding : str, options, flush=None) -> bool: + def process_damage_region(self, damage_time, x : int, y : int, w : int, h : int, + coding : str, options, flush=None) -> bool: """ Called by 'damage' or 'send_delayed_regions' to process a damage region. @@ -2157,7 +2143,7 @@ def process_damage_region(self, damage_time, x : int, y : int, w : int, h : int, item = (w, h, damage_time, now, image, coding, sequence, options, flush) self.call_in_encode_thread(True, self.make_data_packet_cb, *item) log("process_damage_region: wid=%i, sequence=%i, adding pixel data to encode queue (%4ix%-4i - %5s), elapsed time: %3.1f ms, request time: %3.1f ms", - self.wid, sequence, w, h, coding, 1000*(now-damage_time), 1000*(now-rgb_request_time)) + self.wid, sequence, w, h, coding, 1000*(now-damage_time), 1000*(now-rgb_request_time)) return True def scaled_size(self, image : ImageWrapper) -> tuple[int,int] | None: @@ -2167,13 +2153,12 @@ def scaled_size(self, image : ImageWrapper) -> tuple[int,int] | None: w, h = image.get_width(), image.get_height() ww, wh = self.window_dimensions crsw, crsh = crs - #resize if the render size is smaller - if ww-crsw>DOWNSCALE_THRESHOLD and wh-crsh>DOWNSCALE_THRESHOLD: - #keep the same proportions: + # resize if the render size is smaller + if ww-crsw > DOWNSCALE_THRESHOLD and wh-crsh > DOWNSCALE_THRESHOLD: + # keep the same proportions: return w*crsw//ww, h*crsh//wh return None - def make_data_packet_cb(self, w : int, h : int, damage_time, process_damage_time, image : ImageWrapper, coding : str, sequence : int, options, flush) -> None: """ This function is called from the damage data thread! @@ -2191,45 +2176,44 @@ def make_data_packet_cb(self, w : int, h : int, damage_time, process_damage_time finally: self.free_image_wrapper(image) del image - #may have been cancelled whilst we processed it: + # may have been cancelled whilst we processed it: self.statistics.encoding_pending.pop(sequence, None) - #NOTE: we MUST send it (even if the window is cancelled by now..) - #because the code may rely on the client having received this frame + # NOTE: we MUST send it (even if the window is cancelled by now..) + # because the code may rely on the client having received this frame if not packet: return - #queue packet for sending: + # queue packet for sending: self.queue_damage_packet(packet, damage_time, process_damage_time, options) - def schedule_auto_refresh(self, packet : tuple, options) -> None: if not self.can_refresh(): self.cancel_refresh_timer() return encoding = packet[6] data = packet[7] - region = rectangle(*packet[2:6]) #x,y,w,h - client_options = packet[10] #info about this packet from the encoder + region = rectangle(*packet[2:6]) # x,y,w,h + client_options = packet[10] # info about this packet from the encoder self.do_schedule_auto_refresh(encoding, data, region, client_options, options) def do_schedule_auto_refresh(self, encoding : str, data, region, client_options, options) -> None: assert data - if self.encoding=="stream": - #streaming mode doesn't use refresh + if self.encoding == "stream": + # streaming mode doesn't use refresh return if encoding.startswith("png"): actual_quality = 100 - lossy = self.image_depth>32 or self.image_depth==30 - elif encoding.startswith("rgb") or encoding=="mmap": + lossy = self.image_depth > 32 or self.image_depth == 30 + elif encoding.startswith("rgb") or encoding == "mmap": actual_quality = 100 lossy = False else: actual_quality = client_options.get("quality", 0) lossy = ( - actual_quality=due_pixcount//2: + # a refresh is already due + if added_pixcount >= due_pixcount//2: #we have more than doubled the number of pixels to refresh #use the total due pct = 100*due_pixcount//window_pixcount - #don't use sqrt() on pct, - #so this will not move it forwards for small updates following bigger ones: + # don't use sqrt() on pct, + # so this will not move it forwards for small updates following bigger ones: sched_delay = max( self.batch_config.delay*5, self.min_auto_refresh_delay, @@ -2313,33 +2297,33 @@ def rec(msg): self.refresh_target_time = min(max_time, max(target_time, now + sched_delay/1000.0)) added_ms = int(1000*(self.refresh_target_time-target_time)) due_ms = int(1000*(self.refresh_target_time-now)) - if self.refresh_target_time==target_time: + if self.refresh_target_time == target_time: return rec(f"unchanged refresh: due in {due_ms}ms, pct={pct}") rec("re-scheduling refresh: due in %ims, %ims added - sched_delay=%s, pct=%i, batch=%i)" % ( due_ms, added_ms, sched_delay, pct, self.batch_config.delay)) def remove_refresh_region(self, region) -> None: - #removes the given region from the refresh list - #(also overridden in window video source) + # removes the given region from the refresh list + # (also overridden in window video source) remove_rectangle(self.refresh_regions, region) def add_refresh_region(self, region) -> int: - #adds the given region to the refresh list - #returns the number of pixels in the region update - #(overridden in window video source to exclude the video region) - #Note: this does not run in the UI thread! + # adds the given region to the refresh list + # returns the number of pixels in the region update + # (overridden in window video source to exclude the video region) + # Note: this does not run in the UI thread! return add_rectangle(self.refresh_regions, region) def can_refresh(self) -> bool: if not AUTO_REFRESH: return False w = self.window - #safe to call from any thread (does not call X11): + # safe to call from any thread (does not call X11): if not w or not w.is_managed(): - #window is gone + # window is gone return False - if self.auto_refresh_delay<=0 or self.is_cancelled() or not self.auto_refresh_encodings: - #can happen during cleanup + if self.auto_refresh_delay <= 0 or self.is_cancelled() or not self.auto_refresh_encodings: + # can happen during cleanup return False return True @@ -2358,25 +2342,25 @@ def refresh_timer_function(self, damage_options) -> bool: self.refresh_event_time = 0 return False ret = self.refresh_event_time - if ret==0: + if ret == 0: return False delta = self.refresh_target_time - monotonic() - if delta<0.050: - #this is about right (due already or due shortly) + if delta < 0.050: + # this is about right (due already or due shortly) self.timer_full_refresh() return False - #re-schedule ourselves: + # re-schedule ourselves: self.refresh_timer = self.timeout_add(int(delta*1000), self.refresh_timer_function, damage_options) refreshlog("refresh_timer_function: rescheduling auto refresh timer with extra delay %ims", int(1000*delta)) return False def timer_full_refresh(self) -> bool: - #copy event time and list of regions (which may get modified by another thread) + # copy event time and list of regions (which may get modified by another thread) ret = self.refresh_event_time self.refresh_event_time = 0 regions = self.refresh_regions self.refresh_regions = [] - if self.can_refresh() and regions and ret>0: + if self.can_refresh() and regions and ret > 0: now = monotonic() options = self.get_refresh_options() refresh_exclude = self.get_refresh_exclude() #pylint: disable=assignment-from-none @@ -2394,23 +2378,23 @@ def get_refresh_encoding(self, w : int, h : int, options, coding : str) -> str: return encoding def get_refresh_exclude(self): - #overridden in window video source to exclude the video subregion + # overridden in window video source to exclude the video subregion return None def full_quality_refresh(self, damage_options) -> None: - #can be called from: + # can be called from: # * xpra control channel # * send timeout # * client decoding error if not self.window or not self.window.is_managed(): - #this window is no longer managed + # this window is no longer managed return if not self.auto_refresh_encodings or self.is_cancelled(): - #can happen during cleanup + # can happen during cleanup return refresh_regions = self.refresh_regions - #since we're going to refresh the whole window, - #we don't need to track what needs refreshing: + # since we're going to refresh the whole window, + # we don't need to track what needs refreshing: self.refresh_regions = [] w, h = self.window_dimensions refreshlog("full_quality_refresh() for %sx%s window with pending refresh regions: %s", w, h, refresh_regions) @@ -2418,7 +2402,7 @@ def full_quality_refresh(self, damage_options) -> None: encoding = self.auto_refresh_encodings[0] new_options.update(self.get_refresh_options()) refreshlog("full_quality_refresh() using %s with options=%s", encoding, new_options) - #just refresh the whole window: + # just refresh the whole window: regions = [rectangle(0, 0, w, h)] now = monotonic() damage = DelayedRegions(damage_time=now, regions=regions, encoding=encoding, options=new_options) @@ -2441,7 +2425,7 @@ def queue_damage_packet(self, packet, damage_time:float=0, process_damage_time:f - number of pixels in damage packet queue - damage latency (via a callback once the packet is actually sent) """ - #packet = ["draw", wid, x, y, w, h, coding, data, self._damage_packet_sequence, rowstride, client_options] + # packet = ["draw", wid, x, y, w, h, coding, data, self._damage_packet_sequence, rowstride, client_options] width = int(packet[4]) height = int(packet[5]) coding, data, damage_packet_sequence, _, client_options = packet[6:11] @@ -2457,24 +2441,24 @@ def damage_packet_sent(bytecount:int): now = monotonic() ack_pending[3] = now ack_pending[4] = bytecount - if process_damage_time>0: + if process_damage_time > 0: statistics.damage_out_latency.append((now, width*height, actual_batch_delay, now-process_damage_time)) elapsed_ms = int((now-ack_pending[0])*1000) - #only record slow send as congestion events - #if the bandwidth limit is already below the threshold: - if ldata>1024 and self.bandwidth_limit 1024 and self.bandwidth_limit < SLOW_SEND_THRESHOLD: #if this packet completed late, record congestion send speed: max_send_delay = 5 + self.estimate_send_delay(ldata) - if elapsed_ms>max_send_delay: + if elapsed_ms > max_send_delay: late_pct = round(elapsed_ms*100/max_send_delay)-100 send_speed = int(ldata*8*1000/elapsed_ms) self.networksend_congestion_event("slow send", late_pct, send_speed) self.schedule_auto_refresh(packet, options or {}) - if process_damage_time>0: + if process_damage_time > 0: now = monotonic() damage_in_latency = now-process_damage_time statistics.damage_in_latency.append((now, width*height, actual_batch_delay, damage_in_latency)) - #log.info("queuing %s packet with fail_cb=%s", coding, fail_cb) + # log.info("queuing %s packet with fail_cb=%s", coding, fail_cb) self.statistics.last_packet_time = monotonic() self.queue_packet(packet, self.wid, width*height, start_send, damage_packet_sent, self.get_fail_cb(packet), client_options.get("flush", 0)) @@ -2483,39 +2467,39 @@ def networksend_congestion_event(self, source, late_pct:int, cur_send_speed:int= gs = self.global_statistics if not gs: return - #calculate the send speed for the packet we just sent: + # calculate the send speed for the packet we just sent: now = monotonic() send_speed = cur_send_speed avg_send_speed = 0 if len(gs.bytes_sent)>=5: - #find a sample more than a second old - #(hopefully before the congestion started) + # find a sample more than a second old + # (hopefully before the congestion started) stime1 = svalue1 = svalue2 = 0 i = 1 - while i<4: + while i < 4: stime1, svalue1 = gs.bytes_sent[-i] i += 1 - if now-stime1>1: + if now-stime1 > 1: break - #find a sample more than 4 seconds earlier, - #with at least 64KB sent in between: + # find a sample more than 4 seconds earlier, + # with at least 64KB sent in between: t = 0 - while i10: - #too far back, not enough data sent in 10 seconds + if t > 10: + # too far back, not enough data sent in 10 seconds break - if t>=4 and (svalue1-svalue2)>=65536: + if t >= 4 and (svalue1-svalue2)>=65536: break i += 1 - if 4<=t<=10: - #calculate the send speed over that interval: + if 4 <= t <= 10: + # calculate the send speed over that interval: bcount = svalue1-svalue2 avg_send_speed = int(bcount*8//t) if cur_send_speed: - #weighted average, - #when we're very late, the value is much more likely to be correct + # weighted average, + # when we're very late, the value is much more likely to be correct send_speed = (avg_send_speed*100 + cur_send_speed*late_pct)//2//(100+late_pct) else: send_speed = avg_send_speed @@ -2523,7 +2507,7 @@ def networksend_congestion_event(self, source, late_pct:int, cur_send_speed:int= source, late_pct, cur_send_speed, send_speed//1024, avg_send_speed//1024, self.wid) rtt = self.refresh_target_time if rtt: - #a refresh now would really hurt us! + # a refresh now would really hurt us! self.refresh_target_time = max(rtt, now+2) self.record_congestion_event(source, late_pct, send_speed) @@ -2538,15 +2522,14 @@ def resend(): return resend def estimate_send_delay(self, bytecount : int) -> int: - #how long it should take to send this packet (in milliseconds) - #based on the bandwidth available (if we know it): + # how long it should take to send this packet (in milliseconds) + # based on the bandwidth available (if we know it): bl = self.bandwidth_limit - if bl>0: - #estimate based on current bandwidth limit: + if bl > 0: + # estimate based on current bandwidth limit: return 1000*bytecount*8//max(200000, bl) return int(10*logp(bytecount/1024.0)) - def damage_packet_acked(self, damage_packet_sequence, width, height, decode_time, message) -> None: """ The client is acknowledging a damage packet, @@ -2559,13 +2542,13 @@ def damage_packet_acked(self, damage_packet_sequence, width, height, decode_time """ statslog("packet decoding sequence %s for window %s: %sx%s took %.1fms", damage_packet_sequence, self.wid, width, height, decode_time/1000.0) - if decode_time>0: + if decode_time > 0: self.statistics.client_decode_time.append((monotonic(), width*height, decode_time)) - elif decode_time==WINDOW_DECODE_SKIPPED: + elif decode_time == WINDOW_DECODE_SKIPPED: log(f"client skipped decoding sequence {damage_packet_sequence} for window {self.wid}") - elif decode_time==WINDOW_NOT_FOUND: + elif decode_time == WINDOW_NOT_FOUND: log.warn("Warning: client cannot find window %i", self.wid) - elif decode_time==WINDOW_DECODE_ERROR: + elif decode_time == WINDOW_DECODE_ERROR: self.client_decode_error(decode_time, message) pending = self.statistics.damage_ack_pending.pop(damage_packet_sequence, None) if pending is None: @@ -2574,42 +2557,42 @@ def damage_packet_acked(self, damage_packet_sequence, width, height, decode_time gs = self.global_statistics start_send_at, _, start_bytes, end_send_at, end_bytes, pixels, client_options, damage_time = pending bytecount = end_bytes-start_bytes - #it is possible though unlikely - #that we get the ack before we've had a chance to call - #damage_packet_sent, so we must validate the data: - if bytecount>0 and end_send_at>0: + # it is possible though unlikely + # that we get the ack before we've had a chance to call + # damage_packet_sent, so we must validate the data: + if bytecount > 0 and end_send_at > 0: now = monotonic() - if decode_time>0: + if decode_time > 0: latency = int(1000*(now-damage_time)) self.global_statistics.record_latency(self.wid, damage_packet_sequence, decode_time, start_send_at, end_send_at, pixels, bytecount, latency) - #we can ignore some packets: + # we can ignore some packets: # * the first frame (frame=0) of video encoders can take longer to decode # as we have to create a decoder context frame_no = client_options.get("frame", None) # when flushing a screen update as multiple packets (network layer aggregation), # we could ignore all but the last one (flush=0): - #flush = client_options.get("flush", 0) + # flush = client_options.get("flush", 0) if frame_no!=0: netlatency = int(1000*gs.min_client_latency*(100+ACK_JITTER)//100) sendlatency = min(200, self.estimate_send_delay(bytecount)) - #decode = pixels//100000 #0.1MPixel/s: 2160p -> 8MPixels, 80ms budget + # decode = pixels//100000 #0.1MPixel/s: 2160p -> 8MPixels, 80ms budget live_time = int(1000*(now-self.statistics.init_time)) ack_tolerance = self.jitter + ACK_TOLERANCE + max(0, 200-live_time//10) latency = netlatency + sendlatency + decode_time + ack_tolerance - #late_by and latency are in ms, timestamps are in seconds: + # late_by and latency are in ms, timestamps are in seconds: actual = int(1000*(now-start_send_at)) late_by = actual-latency - if late_by>0 and (live_time>=1000 or pixels>=4096): + if late_by > 0 and (live_time >= 1000 or pixels >= 4096): actual_send_latency = actual-netlatency-decode_time late_pct = actual_send_latency*100//(1+sendlatency) - if pixels<=4096 or actual_send_latency<=0: - #small packets can really skew things, don't bother - #(this also filters out scroll packets which are tiny) + if pixels <= 4096 or actual_send_latency <= 0: + # small packets can really skew things, don't bother + # (this also filters out scroll packets which are tiny) send_speed = 0 else: send_speed = bytecount*8*1000//actual_send_latency - #statslog("send latency: expected up to %3i, got %3i, %6iKB sent in %3i ms: %5iKbps", + # statslog("send latency: expected up to %3i, got %3i, %6iKB sent in %3i ms: %5iKbps", # latency, actual, bytecount//1024, actual_send_latency, send_speed//1024) self.networksend_congestion_event("late-ack for sequence %6i: late by %3ims, target latency=%3i (%s)" % (damage_packet_sequence, late_by, latency, (netlatency, sendlatency, decode_time, ack_tolerance)), @@ -2622,13 +2605,13 @@ def call_may_send_delayed(): log("call_may_send_delayed()") self.cancel_may_send_timer() self.may_send_delayed() - #this function is called from the network thread, - #call via idle_add to prevent race conditions: + # this function is called from the network thread, + # call via idle_add to prevent race conditions: log("ack with expired delayed region: %s", damage_delayed) self.idle_add(call_may_send_delayed) def client_decode_error(self, error, message) -> None: - #don't print error code -1, which is just a generic code for error + # don't print error code -1, which is just a generic code for error emsg = {-1 : ""}.get(error, error) def s(v): return decode_str(v or b"") @@ -2656,7 +2639,7 @@ def cancel_decode_error_refresh_timer(self) -> None: def may_use_scrolling(self, _image, _options) -> bool: - #overridden in video source + # overridden in video source return False @@ -2689,18 +2672,18 @@ def nodata(msg, *args) -> None: y = image.get_target_y() w = image.get_width() h = image.get_height() - if w<=0 or h<=0: + if w <= 0 or h <= 0: raise RuntimeError(f"invalid dimensions: {w}x{h}") - #more useful is the actual number of bytes (assuming 32bpp) - #since we generally don't send the padding with it: + # more useful is the actual number of bytes (assuming 32bpp) + # since we generally don't send the padding with it: psize = w*h*4 log("make_data_packet: image=%s, damage data: %s", image, (self.wid, x, y, w, h, coding)) start = monotonic() if self.cuda_device_context: options["cuda-device-context"] = self.cuda_device_context - #by default, don't set rowstride (the container format will take care of providing it): + # by default, don't set rowstride (the container format will take care of providing it): encoder = self._encoders.get(coding) if encoder is None: if self.is_cancelled(sequence): @@ -2712,9 +2695,9 @@ def nodata(msg, *args) -> None: get_encoder_type(encoder), (coding, image, options)) coding, data, client_options, outw, outh, outstride, bpp = ret - #check for cancellation again since the code above may take some time to encode: - #but never cancel mmap after encoding because we need to reclaim the space - #by getting the client to move the mmap received pointer + # check for cancellation again since the code above may take some time to encode: + # but never cancel mmap after encoding because we need to reclaim the space + # by getting the client to move the mmap received pointer if coding!="mmap": if self.is_cancelled(sequence): return nodata("cancelled after encoding") @@ -2724,7 +2707,7 @@ def nodata(msg, *args) -> None: if (LOG_ENCODERS or compresslog.is_debug_enabled()) and "encoder" not in client_options: mod = "mmap_encode" if encoder == self.mmap_encode else get_encoder_type(encoder) client_options["encoder"] = mod - #actual network packet: + # actual network packet: if flush not in (None, 0): client_options["flush"] = flush if self.send_timetamps: @@ -2757,18 +2740,18 @@ def make_draw_packet(self, x : int, y : int, outw : int, outh : int, self.global_statistics.packet_count += 1 self.statistics.packet_count += 1 self._damage_packet_sequence += 1 - #record number of frames and pixels: + # record number of frames and pixels: totals = self.statistics.encoding_totals.setdefault(coding, [0, 0]) totals[0] = totals[0] + 1 totals[1] = totals[1] + outw*outh self.encoding_last_used = coding - #log("make_data_packet: returning packet=%s", packet[:7]+[".."]+packet[8:]) + # log("make_data_packet: returning packet=%s", packet[:7]+[".."]+packet[8:]) return packet def direct_queue_draw(self, coding:str, data:bytes, client_info:dict) -> None: - #this is a frame from a compressed stream, - #send it to all the window sources for this window: + # this is a frame from a compressed stream, + # send it to all the window sources for this window: cdata = Compressed(coding, data) options : dict[str,Any] = {} x = y = 0 @@ -2781,9 +2764,9 @@ def direct_queue_draw(self, coding:str, data:bytes, client_info:dict) -> None: def mmap_encode(self, coding : str, image : ImageWrapper, _options) -> tuple: - assert coding=="mmap" - assert self._mmap and self._mmap_size>0 - #prepare the pixels in a format accepted by the client: + assert coding == "mmap" + assert self._mmap and self._mmap_size > 0 + # prepare the pixels in a format accepted by the client: pf = image.get_pixel_format() if pf not in self.rgb_formats: if not rgb_reformat(image, self.rgb_formats, self.supports_transparency): @@ -2792,20 +2775,21 @@ def mmap_encode(self, coding : str, image : ImageWrapper, _options) -> tuple: log.warn(f"Warning: cannot use mmap to send {pf}") return () pf = image.get_pixel_format() - #write to mmap area: + # write to mmap area: data = image.get_pixels() if not data: raise RuntimeError(f"failed to get pixels from {image}") - mmap_data, mmap_free_size = self.mmap_write(self._mmap, self._mmap_size, data) - #elapsed = monotonic()-start+0.000000001 #make sure never zero! - #log("%s MBytes/s - %s bytes written to mmap in %.1f ms", int(len(data)/elapsed/1024/1024), + from xpra.net.mmap import mmap_write + mmap_data, mmap_free_size = mmap_write(self._mmap, self._mmap_size, data) + # elapsed = monotonic()-start+0.000000001 #make sure never zero! + # log("%s MBytes/s - %s bytes written to mmap in %.1f ms", int(len(data)/elapsed/1024/1024), # len(data), 1000*elapsed) if mmap_data is None: return () self.global_statistics.mmap_bytes_sent += len(data) self.global_statistics.mmap_free_size = mmap_free_size - #the data we send is the index within the mmap area: + # the data we send is the index within the mmap area: return ( "mmap", mmap_data, {"rgb_format" : pf}, image.get_width(), image.get_height(), image.get_rowstride(), len(pf)*8, - ) + ) diff --git a/xpra/server/window/video_compress.py b/xpra/server/window/video_compress.py index 9a2fc7e553..163b8a6375 100644 --- a/xpra/server/window/video_compress.py +++ b/xpra/server/window/video_compress.py @@ -68,11 +68,13 @@ SCALING_PPS_TARGET = envint("XPRA_SCALING_PPS_TARGET", 25*1920*1080) SCALING_MIN_PPS = envint("XPRA_SCALING_MIN_PPS", 25*320*240) DEFAULT_SCALING_OPTIONS = (1, 10), (1, 5), (1, 4), (1, 3), (1, 2), (2, 3), (1, 1) + + def parse_scaling_options_str(scaling_options_str) -> tuple: if not scaling_options_str: return DEFAULT_SCALING_OPTIONS - #parse 1/10,1/5,1/4,1/3,1/2,2/3,1/1 - #or even: 1:10, 1:5, ... + # parse 1/10,1/5,1/4,1/3,1/2,2/3,1/1 + # or even: 1:10, 1:5, ... vs_options = [] for option in scaling_options_str.split(","): try: @@ -90,6 +92,7 @@ def parse_scaling_options_str(scaling_options_str) -> tuple: if vs_options: return tuple(vs_options) return DEFAULT_SCALING_OPTIONS + SCALING_OPTIONS = parse_scaling_options_str(os.environ.get("XPRA_SCALING_OPTIONS")) scalinglog("scaling options: SCALING=%s, HARDCODED=%s, PPS_TARGET=%i, MIN_PPS=%i, OPTIONS=%s", SCALING, SCALING_HARDCODED, SCALING_PPS_TARGET, SCALING_MIN_PPS, SCALING_OPTIONS) @@ -158,8 +161,8 @@ class WindowVideoSource(WindowSource): """ def __init__(self, *args): - #this will call init_vars(): self.supports_scrolling : bool = False + # this will call init_vars(): super().__init__(*args) self.video_subregion = VideoSubregion(self.refresh_subregion, self.auto_refresh_delay, VIDEO_SUBREGION) self.supports_scrolling : bool = False @@ -174,8 +177,8 @@ def __repr__(self) -> str: def init_vars(self) -> None: super().init_vars() - #these constraints get updated with real values - #when we construct the video pipeline: + # these constraints get updated with real values + # when we construct the video pipeline: self.min_w : int = 8 self.min_h : int = 8 self.max_w : int = 16384 @@ -215,14 +218,14 @@ def do_init_encoders(self) -> None: def add(enc, encode_fn): self.insert_encoder(enc, enc, encode_fn) if has_codec("csc_libyuv"): - #need libyuv to be able to handle 'grayscale' video: - #(to convert ARGB to grayscale) + # need libyuv to be able to handle 'grayscale' video: + # (to convert ARGB to grayscale) add("grayscale", self.video_encode) - if self._mmap_size>0: + if self._mmap_size > 0: self.non_video_encodings = () self.common_video_encodings = () return - #make sure we actually have encoders for these: + # make sure we actually have encoders for these: vencs = self.video_helper.get_encodings() self.video_encodings = preforder(vencs) self.common_video_encodings = preforder(set(self.video_encodings) & set(self.core_encodings)) @@ -232,12 +235,12 @@ def add(enc, encode_fn): for x in self.common_video_encodings: self.append_encoder(x, self.video_encode) video_enabled.append(x) - #video_encode() is used for more than just video encoders: - #(always enable it and let it fall through) + # video_encode() is used for more than just video encoders: + # (always enable it and let it fall through) add("auto", self.video_encode) add("stream", self.video_encode) - #these are used for non-video areas, ensure "jpeg" is used if available - #as we may be dealing with large areas still, and we want speed: + # these are used for non-video areas, ensure "jpeg" is used if available + # as we may be dealing with large areas still, and we want speed: enc_options = set(self.server_core_encodings) & set(self._encoders.keys()) nv_common = (enc_options & set(self.core_encodings)) - set(self.video_encodings) self.non_video_encodings = preforder(nv_common) @@ -258,17 +261,16 @@ def update_av_sync_frame_delay(self) -> None: self.av_sync_frame_delay = 0 ve = self._video_encoder if ve: - #how many frames are buffered in the encoder, if any: + # how many frames are buffered in the encoder, if any: d = ve.get_info().get("delayed", 0) - if d>0: - #clamp the batch delay to a reasonable range: + if d > 0: + # clamp the batch delay to a reasonable range: frame_delay = min(100, max(10, self.batch_config.delay)) self.av_sync_frame_delay += frame_delay * d avsynclog("update_av_sync_frame_delay() video encoder=%s, delayed frames=%i, frame delay=%i", ve, d, self.av_sync_frame_delay) self.may_update_av_sync_delay() - def get_property_info(self) -> dict[str,Any]: i = super().get_property_info() if self.scaling_control is None: @@ -311,7 +313,7 @@ def addcinfo(prefix, x): "time" : int(self.last_scroll_time*1000), } } - if self._last_pipeline_check>0: + if self._last_pipeline_check > 0: einfo["pipeline_last_check"] = int(1000*(monotonic()-self._last_pipeline_check)) lps = self.last_pipeline_scores if lps: @@ -335,10 +337,9 @@ def get_pipeline_info(self) -> dict[str,Any]: def suspend(self) -> None: super().suspend() - #we'll create a new video pipeline when resumed: + # we'll create a new video pipeline when resumed: self.cleanup_codecs() - def cleanup(self) -> None: super().cleanup() self.cleanup_codecs() @@ -380,10 +381,10 @@ def ve_clean(self, ve) -> None: self.cancel_video_encoder_timer() if ve: ve.clean() - #only send eos if this video encoder is still current, - #(otherwise, sending the new stream will have taken care of it already, + # only send eos if this video encoder is still current, + # (otherwise, sending the new stream will have taken care of it already, # and sending eos then would close the new stream, not the old one!) - if self._video_encoder==ve: + if self._video_encoder == ve: log("sending eos for wid %i", self.wid) self.queue_packet(("eos", self.wid)) if SAVE_VIDEO_STREAMS: @@ -400,25 +401,24 @@ def ui_cleanup(self) -> None: super().ui_cleanup() self.video_subregion = None - def set_new_encoding(self, encoding : str, strict=None) -> None: if self.encoding!=encoding: - #ensure we re-init the codecs asap: + # ensure we re-init the codecs asap: self.cleanup_codecs() super().set_new_encoding(encoding, strict) def insert_encoder(self, encoder_name : str, encoding : str, encode_fn : Callable) -> None: super().insert_encoder(encoder_name, encoding, encode_fn) - #we don't want to use nvjpeg as fallback, - #because it requires a GPU context - #and the fallback should be reliable. - #also, we only want picture encodings here, - #and filtering using EDGE_ENCODING_ORDER gives us that. + # we don't want to use nvjpeg as fallback, + # because it requires a GPU context + # and the fallback should be reliable. + # also, we only want picture encodings here, + # and filtering using EDGE_ENCODING_ORDER gives us that. if encoder_name!="nvjpeg" and encoding in EDGE_ENCODING_ORDER: self.video_fallback_encodings.setdefault(encoding, []).insert(0, encode_fn) def update_encoding_selection(self, encoding=None, exclude=None, init=False) -> None: - #override so we don't use encodings that don't have valid csc modes: + # override so we don't use encodings that don't have valid csc modes: log("wvs.update_encoding_selection(%s, %s, %s) full_csc_modes=%s", encoding, exclude, init, self.full_csc_modes) if exclude is None: exclude = [] @@ -432,7 +432,7 @@ def update_encoding_selection(self, encoding=None, exclude=None, init=False) -> if not csc_modes or x not in self.core_encodings: exclude.append(x) msg_args = ("Warning: client does not support any csc modes with %s on window %i", x, self.wid) - if x=="jpega" and not self.supports_transparency: + if x == "jpega" and not self.supports_transparency: log(f"skipping {x} since client does not support transparency") elif not init and first_time(f"no-csc-{x}-{self.wid}"): log.warn(*msg_args) @@ -446,7 +446,7 @@ def update_encoding_selection(self, encoding=None, exclude=None, init=False) -> self.supports_scrolling = "scroll" in self.common_encodings def do_set_client_properties(self, properties : typedict) -> None: - #client may restrict csc modes for specific windows + # client may restrict csc modes for specific windows self.supports_scrolling = "scroll" in self.common_encodings self.scroll_min_percent = properties.intget("scrolling.min-percent", self.scroll_min_percent) self.scroll_preference = properties.intget("scrolling.preference", self.scroll_preference) @@ -454,7 +454,7 @@ def do_set_client_properties(self, properties : typedict) -> None: if properties.get("scaling.control") is not None: self.scaling_control = max(0, min(100, properties.intget("scaling.control", 0))) super().do_set_client_properties(properties) - #encodings may have changed, so redo this: + # encodings may have changed, so redo this: nv_common = set(self.picture_encodings) & set(self.core_encodings) log("common non-video (%s & %s)=%s", self.picture_encodings, self.core_encodings, nv_common) self.non_video_encodings = preforder(nv_common) @@ -471,7 +471,7 @@ def get_best_encoding_impl_default(self) -> Callable: if self.is_tray: log("using default for tray") return super().get_best_encoding_impl_default() - if self.encoding=="stream": + if self.encoding == "stream": log("using stream encoding") return self.get_best_encoding_video if self.window_type.intersection(LOSSLESS_WINDOW_TYPES): @@ -484,7 +484,6 @@ def get_best_encoding_impl_default(self) -> Callable: log("using default best encoding") return super().get_best_encoding_impl_default() - def get_best_encoding_video(self, w : int, h : int, options, current_encoding : str) -> str: """ decide whether we send a full window update using the video encoder, @@ -497,7 +496,7 @@ def nonvideo(qdiff=None, info=""): videolog("nonvideo(%s, %s)", qdiff, info) return WindowSource.get_auto_encoding(self, w, h, options) - #log("get_best_encoding_video%s non_video_encodings=%s, common_video_encodings=%s, supports_scrolling=%s", + # log("get_best_encoding_video%s non_video_encodings=%s, common_video_encodings=%s, supports_scrolling=%s", # (pixel_count, ww, wh, speed, quality, current_encoding), # self.non_video_encodings, self.common_video_encodings, self.supports_scrolling) if not self.non_video_encodings: @@ -510,24 +509,24 @@ def nonvideo(qdiff=None, info=""): if text_hint and not TEXT_USE_VIDEO: return nonvideo(100, info="text content-type") - #ensure the dimensions we use for decision-making are the ones actually used: + # ensure the dimensions we use for decision-making are the ones actually used: cww = w & self.width_mask cwh = h & self.height_mask - if cww<64 or cwh<64: + if cww < 64 or cwh < 64: return nonvideo(info="area is too small") - if self.encoding=="stream": + if self.encoding == "stream": return current_encoding video_hint = int(self.content_type.find("video")>=0) if self.pixel_format: - #if we have a hardware video encoder, use video more: + # if we have a hardware video encoder, use video more: self.update_pipeline_scores() for i, score_data in enumerate(self.last_pipeline_scores): encoder_spec = score_data[-1] if encoder_spec.gpu_cost > encoder_spec.cpu_cost: videolog(f"found GPU accelerated encoder {encoder_spec}") - video_hint += 1+int(i==0) + video_hint += 1+int(i == 0) break rgbmax = self._rgb_auto_threshold videomin = cww*cwh // (1+video_hint*2) @@ -536,81 +535,81 @@ def nonvideo(qdiff=None, info=""): videomin = min(videomin, sr.width * sr.height) rgbmax = min(rgbmax, sr.width*sr.height//2) elif text_hint: - #TEXT_USE_VIDEO must be set, - #but only use video if the whole area changed: + # TEXT_USE_VIDEO must be set, + # but only use video if the whole area changed: videomin = cww*cwh else: videomin = min(640*480, cww*cwh) // (1+video_hint*2) - #log(f"ww={ww}, wh={wh}, rgbmax={rgbmax}, videohint={video_hint} videomin={videomin}, sr={sr}, pixel_count={pixel_count}") + # log(f"ww={ww}, wh={wh}, rgbmax={rgbmax}, videohint={video_hint} videomin={videomin}, sr={sr}, pixel_count={pixel_count}") pixel_count = w*h - if pixel_count<=rgbmax: + if pixel_count <= rgbmax: return nonvideo(info=f"low pixel count {pixel_count}") if current_encoding not in ("auto", "grayscale") and current_encoding not in self.common_video_encodings: return nonvideo(info=f"{current_encoding} not a supported video encoding") - if cwwself.max_w or cwhself.max_h: + if cww < self.min_w or cww > self.max_w or cwh < self.min_h or cwh > self.max_h: return nonvideo(info="size out of range for video encoder") now = monotonic() - if now-self.statistics.last_packet_time>1: + if now-self.statistics.last_packet_time > 1: return nonvideo(info="no recent updates") - if now-self.statistics.last_resized<0.350: + if now-self.statistics.last_resized < 0.350: return nonvideo(info="resized recently") if sr and ((sr.width&self.width_mask)!=cww or (sr.height&self.height_mask)!=cwh): - #we have a video region, and this is not it, so don't use video - #raise the quality as the areas around video tend to not be updating as quickly + # we have a video region, and this is not it, so don't use video + # raise the quality as the areas around video tend to not be updating as quickly return nonvideo(30, "not the video region") if not video_hint and not self.is_shadow: - if now-self.global_statistics.last_congestion_time>5: + if now-self.global_statistics.last_congestion_time > 5: lde = tuple(self.statistics.last_damage_events) lim = now-4 - pixels_last_4secs = sum(w*h for when,_,_,w,h in lde if when>lim) - if pixels_last_4secs<((3+text_hint*6)*videomin): + pixels_last_4secs = sum(w*h for when,_,_,w,h in lde if when > lim) + if pixels_last_4secs < ((3+text_hint*6)*videomin): return nonvideo(info="not enough frames") lim = now-1 - pixels_last_sec = sum(w*h for when,_,_,w,h in lde if when>lim) - if pixels_last_sec lim) + if pixels_last_sec < pixels_last_4secs//8: + # framerate is dropping? return nonvideo(30, "framerate lowered") - #calculate the threshold for using video vs small regions: + # calculate the threshold for using video vs small regions: speed = options.get("speed", self._current_speed) factors = ( - max(1, (speed-75)/5.0), #speed multiplier - 1 + int(self.is_OR or self.is_tray)*2, #OR windows tend to be static - max(1, 10-self._sequence), #gradual discount the first 9 frames, as the window may be temporary - 1.0 / (int(bool(self._video_encoder)) + 1), #if we have a video encoder already, make it more likely we'll use it: + max(1, (speed-75)/5.0), # speed multiplier + 1 + int(self.is_OR or self.is_tray)*2, # OR windows tend to be static + max(1, 10-self._sequence), # gradual discount the first 9 frames, as the window may be temporary + 1.0 / (int(bool(self._video_encoder)) + 1), # if we have a video encoder already, make it more likely we'll use it: ) max_nvp = int(reduce(operator.mul, factors, MAX_NONVIDEO_PIXELS)) - if pixel_count<=max_nvp: - #below threshold + if pixel_count <= max_nvp: + # below threshold return nonvideo(info=f"not enough pixels: {pixel_count}<{max_nvp}") return current_encoding def get_best_nonvideo_encoding(self, ww : int, wh : int, options : dict, current_encoding=None, encoding_options=()) -> str: - if self.encoding=="grayscale": + if self.encoding == "grayscale": return self.encoding_is_grayscale(ww, wh, options, current_encoding or self.encoding) - #if we're here, then the window has no alpha (or the client cannot handle alpha) - #and we can ignore the current encoding + # if we're here, then the window has no alpha (or the client cannot handle alpha) + # and we can ignore the current encoding encoding_options = encoding_options or self.non_video_encodings depth = self.image_depth - if (depth==8 and "png/P" in encoding_options) or self.encoding=="png/P": + if (depth == 8 and "png/P" in encoding_options) or self.encoding == "png/P": return "png/P" - if self.encoding=="png/L": + if self.encoding == "png/L": return "png/L" - if self._mmap_size>0: + if self._mmap_size > 0: return "mmap" return super().do_get_auto_encoding(ww, wh, options, current_encoding or self.encoding, encoding_options) def do_damage(self, ww : int, wh : int, x : int, y : int, w : int, h : int, options): - if ww>=64 and wh>=64 and self.encoding=="stream" and STREAM_MODE=="gstreamer" and self.common_video_encodings: - #in this mode, we start a pipeline once - #and let it submit packets, bypassing all the usual logic: + if ww >= 64 and wh >= 64 and self.encoding == "stream" and STREAM_MODE == "gstreamer" and self.common_video_encodings: + # in this mode, we start a pipeline once + # and let it submit packets, bypassing all the usual logic: if not self.gstreamer_pipeline: self.start_gstreamer_pipeline() return @@ -618,7 +617,7 @@ def do_damage(self, ww : int, wh : int, x : int, y : int, w : int, h : int, opti if vs: r = vs.rectangle if r and r.intersects(x, y, w, h): - #the damage will take care of scheduling it again + # the damage will take care of scheduling it again vs.cancel_refresh_timer() super().do_damage(ww, wh, x, y, w, h, options) @@ -667,8 +666,8 @@ def cancel_damage(self, limit : int=0): self.last_scroll_time = 0 super().cancel_damage(limit) self.stop_gstreamer_pipeline() - #we must clean the video encoder to ensure - #we will resend a key frame because we may be missing a frame + # we must clean the video encoder to ensure + # we will resend a key frame because we may be missing a frame self.cleanup_codecs() @@ -676,15 +675,15 @@ def full_quality_refresh(self, damage_options : dict) -> None: vs = self.video_subregion if vs and vs.rectangle: if vs.detection: - #reset the video region on full quality refresh + # reset the video region on full quality refresh vs.reset() else: - #keep the region, but cancel the refresh: + # keep the region, but cancel the refresh: vs.cancel_refresh_timer() self.free_scroll_data() self.last_scroll_time = 0 if self.non_video_encodings: - #refresh the whole window in one go: + # refresh the whole window in one go: damage_options["novideo"] = True super().full_quality_refresh(damage_options) @@ -704,19 +703,17 @@ def speed_changed(self, window, *args) -> bool: self.video_context_clean() return True - def client_decode_error(self, error, message) -> None: - #maybe the stream is now corrupted.. + # maybe the stream is now corrupted.. self.cleanup_codecs() super().client_decode_error(error, message) - def get_refresh_exclude(self): - #exclude video region (if any) from lossless refresh: + # exclude video region (if any) from lossless refresh: return self.video_subregion.rectangle def refresh_subregion(self, regions) -> bool: - #callback from video subregion to trigger a refresh of some areas + # callback from video subregion to trigger a refresh of some areas if not regions: regionrefreshlog("refresh_subregion(%s) nothing to refresh", regions) return False @@ -724,7 +721,7 @@ def refresh_subregion(self, regions) -> bool: regionrefreshlog("refresh_subregion(%s) cannot refresh", regions) return False now = monotonic() - if now-self.global_statistics.last_congestion_time<5: + if now-self.global_statistics.last_congestion_time < 5: regionrefreshlog("refresh_subregion(%s) skipping refresh due to congestion", regions) return False self.flush_video_encoder_now() @@ -739,7 +736,7 @@ def get_refresh_subregion_encoding(self, *_args) -> str: ww, wh = self.window_dimensions w, h = ww, wh vr = self.video_subregion.rectangle - #could have been cleared by another thread: + # could have been cleared by another thread: if vr: w, h = vr.width, vr.height options = { @@ -750,32 +747,32 @@ def get_refresh_subregion_encoding(self, *_args) -> str: self.auto_refresh_encodings[0], self.auto_refresh_encodings) def remove_refresh_region(self, region) -> None: - #override so we can update the subregion timers / regions tracking: + # override so we can update the subregion timers / regions tracking: super().remove_refresh_region(region) self.video_subregion.remove_refresh_region(region) def add_refresh_region(self, region) -> int: - #Note: this does not run in the UI thread! - #returns the number of pixels in the region update - #don't refresh the video region as part of normal refresh, - #use subregion refresh for that + # Note: this does not run in the UI thread! + # returns the number of pixels in the region update + # don't refresh the video region as part of normal refresh, + # use subregion refresh for that sarr = super().add_refresh_region vr = self.video_subregion.rectangle if vr is None: - #no video region, normal code path: + # no video region, normal code path: return sarr(region) if vr.contains_rect(region): - #all of it is in the video region: + # all of it is in the video region: self.video_subregion.add_video_refresh(region) return 0 ir = vr.intersection_rect(region) if ir is None: - #region is outside video region, normal code path: + # region is outside video region, normal code path: return sarr(region) - #add intersection (rectangle in video region) to video refresh: + # add intersection (rectangle in video region) to video refresh: self.video_subregion.add_video_refresh(ir) - #add any rectangles not in the video region - #(if any: keep track if we actually added anything) + # add any rectangles not in the video region + # (if any: keep track if we actually added anything) return sum(sarr(r) for r in region.subtract_rect(vr)) def matches_video_subregion(self, width : int, height : int): @@ -801,13 +798,12 @@ def subregion_is_video(self) -> bool: if self.content_type.find("video")>=0: min_video_events //= 2 min_video_fps //= 2 - if events_count=pixels_in_region*40/100: + if pixels_intersect >= pixels_in_region*40/100: #we have at least 40% of the video region #that needs refreshing, do it: actual_vr = vr @@ -871,13 +867,13 @@ def send_nonvideo(regions=regions, encoding:str=coding, exclude_region=None, #still no luck? if actual_vr is None: #try to find one that has the same dimensions: - same_d = tuple(r for r in regions if r.width==vr.width and r.height==vr.height) + same_d = tuple(r for r in regions if r.width == vr.width and r.height == vr.height) if len(same_d)==1: #probably right.. actual_vr = same_d[0] elif len(same_d)>1: #find one that shares at least one coordinate: - same_c = tuple(r for r in same_d if r.x==vr.x or r.y==vr.y) + same_c = tuple(r for r in same_d if r.x == vr.x or r.y == vr.y) if len(same_c)==1: actual_vr = same_c[0] @@ -887,19 +883,19 @@ def send_nonvideo(regions=regions, encoding:str=coding, exclude_region=None, #found the video region: #sanity check in case the window got resized since: ww, wh = self.window_dimensions - if actual_vr.x+actual_vr.width>ww or actual_vr.y+actual_vr.height>wh: + if actual_vr.x+actual_vr.width > ww or actual_vr.y+actual_vr.height > wh: sublog("video region partially outside the window") send_nonvideo(encoding="") return #send this using the video encoder: video_options = self.assign_sq_options(options, quality_pct=70) - #TODO: encode delay can be derived rather than hard-coded + # TODO: encode delay can be derived rather than hard-coded encode_delay = 50 video_options["av-delay"] = max(0, self.get_frame_encode_delay(options) - encode_delay) self.process_damage_region(damage_time, actual_vr.x, actual_vr.y, actual_vr.width, actual_vr.height, coding, video_options) - #now subtract this region from the rest: + # now subtract this region from the rest: trimmed = [] for r in regions: trimmed += r.subtract_rect(actual_vr) @@ -909,28 +905,28 @@ def send_nonvideo(regions=regions, encoding:str=coding, exclude_region=None, sublog("send_regions: subtracted %s from %s gives us %s", actual_vr, regions, trimmed) regions = trimmed - #merge existing damage delayed region if there is one: - #(this codepath can fire from a video region refresh callback) + # merge existing damage delayed region if there is one: + # (this codepath can fire from a video region refresh callback) dr = self._damage_delayed if dr: regions = dr.regions + regions damage_time = min(damage_time, dr.damage_time) self._damage_delayed = None self.cancel_expire_timer() - #decide if we want to send the rest now or delay some more, - #only delay once the video encoder has dealt with a few frames: + # decide if we want to send the rest now or delay some more, + # only delay once the video encoder has dealt with a few frames: event_count = max(0, self.statistics.damage_events_count - self.video_subregion.set_at) - if not actual_vr or event_count<100: + if not actual_vr or event_count < 100: delay = 0 else: - #non-video is delayed at least 50ms, 4 times the batch delay, but no more than non_max_wait: + # non-video is delayed at least 50ms, 4 times the batch delay, but no more than non_max_wait: elapsed = int(1000.0*(monotonic()-damage_time)) delay = max(self.batch_config.delay*4, self.batch_config.expire_delay) delay = min(delay, self.video_subregion.non_max_wait-elapsed) delay = int(delay) sublog("send_regions event_count=%s, actual_vr=%s, delay=%s", event_count, actual_vr, delay) - if delay<=25: + if delay <= 25: send_nonvideo(regions=regions, encoding="", exclude_region=actual_vr) else: self._damage_delayed = DelayedRegions(damage_time, regions, coding, options=options) @@ -941,10 +937,9 @@ def must_encode_full_frame(self, encoding : str) -> bool: non_video = self.non_video_encodings r = self.full_frames_only or not non_video or (encoding in self.video_encodings and encoding not in non_video) log("must_encode_full_frame(%s)=%s full_frames_only=%s, non_video=%s, video=%s", - encoding, r, self.full_frames_only, non_video, self.video_encodings) + encoding, r, self.full_frames_only, non_video, self.video_encodings) return r - def process_damage_region(self, damage_time, x : int, y : int, w : int, h : int, coding : str, options, flush=None): """ @@ -984,7 +979,7 @@ def process_damage_region(self, damage_time, x : int, y : int, w : int, h : int, # (the xshm backing may change from underneath us if we don't freeze it) av_delay = options.get("av-delay", 0) video_mode = coding in self.common_video_encodings or (coding in ("auto", "stream") and self.common_video_encodings) - must_freeze = av_delay>0 or (video_mode and not image.is_thread_safe()) + must_freeze = av_delay > 0 or (video_mode and not image.is_thread_safe()) log("process_damage_region: av_delay=%s, must_freeze=%s, size=%s, encoding=%s", av_delay, must_freeze, (w, h), coding) if must_freeze: @@ -998,12 +993,12 @@ def call_encode(ew : int, eh : int, eimage, encoding : str, flush): log("process_damage_region: wid=%i, sequence=%i, adding pixel data to encode queue (%4ix%-4i - %5s), elapsed time: %3.1f ms, request time: %3.1f ms, frame delay=%3ims", self.wid, sequence, ew, eh, encoding, 1000*(now-damage_time), 1000*(now-rgb_request_time), av_delay) item = (ew, eh, damage_time, now, eimage, encoding, sequence, options, flush) - if av_delay<=0: + if av_delay <= 0: self.call_in_encode_thread(True, self.make_data_packet_cb, *item) else: self.encode_queue.append(item) self.schedule_encode_from_queue(av_delay) - #now figure out if we need to send edges separately: + # now figure out if we need to send edges separately: ee = self.edge_encoding ow = w oh = h @@ -1013,18 +1008,18 @@ def call_encode(ew : int, eh : int, eimage, encoding : str, flush): if video_mode and ee: dw = ow - w dh = oh - h - if dw>0 and h>0: + if dw > 0 and h > 0: sub = image.get_sub_image(w, 0, dw, oh) regions.append((dw, h, sub, ee)) - if dh>0 and w>0: + if dh > 0 and w > 0: sub = image.get_sub_image(0, h, ow, dh) regions.append((dw, h, sub, ee)) - #the main area: - if w>0 and h>0: + # the main area: + if w > 0 and h > 0: regions.append((w, h, image, coding)) - #process all regions: + # process all regions: if regions: - #ensure that the flush value ends at 0 on the last region: + # ensure that the flush value ends at 0 on the last region: flush = max(len(regions)-1, flush or 0) for i, region in enumerate(regions): w, h, image, coding = region @@ -1034,25 +1029,25 @@ def call_encode(ew : int, eh : int, eimage, encoding : str, flush): def get_frame_encode_delay(self, options) -> int: if not self.av_sync: return 0 - if FORCE_AV_DELAY>=0: + if FORCE_AV_DELAY >= 0: return FORCE_AV_DELAY content_type = options.get("content-type", self.content_type) if AV_SYNC_DEFAULT: - #default to on, unless we're quite sure it should not be used: + # default to on, unless we're quite sure it should not be used: if any(content_type.find(x)>=0 for x in ("text", "picture")): return 0 else: - #default to off unless content-type says otherwise: + # default to off unless content-type says otherwise: if content_type.find("audio")<0: return 0 l = len(self.encode_queue) - if l>=self.encode_queue_max_size: - #we must free some space! + if l >= self.encode_queue_max_size: + # we must free some space! return 0 return self.av_sync_delay def cancel_encode_from_queue(self) -> None: - #free all items in the encode queue: + # free all items in the encode queue: self.encode_from_queue_due = 0 eqt : int = self.encode_from_queue_timer avsynclog("cancel_encode_from_queue() timer=%s for wid=%i", eqt, self.wid) @@ -1072,11 +1067,11 @@ def free_encode_queue_images(self): self.free_image_wrapper(image) def schedule_encode_from_queue(self, av_delay : int) -> None: - #must be called from the UI thread for synchronization - #we ensure that the timer will fire no later than av_delay - #re-scheduling it if it was due later than that + # must be called from the UI thread for synchronization + # we ensure that the timer will fire no later than av_delay + # re-scheduling it if it was due later than that due = monotonic()+av_delay/1000.0 - if self.encode_from_queue_due==0 or due None: self.call_in_encode_thread(True, self.encode_from_queue) def encode_from_queue(self) -> None: - #note: we use a queue here to ensure we preserve the order - #(so we encode frames in the same order they were grabbed) + # note: we use a queue here to ensure we preserve the order + # (so we encode frames in the same order they were grabbed) eq = self.encode_queue avsynclog("encode_from_queue: %s items for wid=%i", len(eq), self.wid) if not eq: return #nothing to encode, must have been picked off already self.update_av_sync_delay() - #find the first item which is due - #in seconds, same as monotonic(): + # find the first item which is due + # in seconds, same as monotonic(): if len(self.encode_queue)>=self.encode_queue_max_size: av_delay = 0 #we must free some space! - elif FORCE_AV_DELAY>0: + elif FORCE_AV_DELAY > 0: av_delay = FORCE_AV_DELAY/1000.0 else: av_delay = self.av_sync_delay/1000.0 @@ -1108,10 +1103,10 @@ def encode_from_queue(self) -> None: index = 0 item = None sequence = None - done_packet = False #only one packet per iteration + done_packet = False # only one packet per iteration try: for index,item in enumerate(eq): - #item = (w, h, damage_time, now, image, coding, sequence, options, flush) + # item = (w, h, damage_time, now, image, coding, sequence, options, flush) sequence = item[6] if self.is_cancelled(sequence): self.free_image_wrapper(item[4]) @@ -1119,27 +1114,27 @@ def encode_from_queue(self) -> None: continue ts = item[3] due = ts + av_delay - if due<=now and not done_packet: - #found an item which is due + if due <= now and not done_packet: + # found an item which is due remove.append(index) avsynclog("encode_from_queue: processing item %s/%s (overdue by %ims)", index+1, len(self.encode_queue), int(1000*(now-due))) self.make_data_packet_cb(*item) done_packet = True else: - #we only process one item per call (see "done_packet") - #and just keep track of extra ones: + # we only process one item per call (see "done_packet") + # and just keep track of extra ones: still_due.append(int(1000*(due-now))) except Exception: if not self.is_cancelled(sequence): avsynclog.error("error processing encode queue at index %i", index) avsynclog.error("item=%s", item, exc_info=True) - #remove the items we've dealt with: - #(in reverse order since we pop them from the queue) + # remove the items we've dealt with: + # (in reverse order since we pop them from the queue) if remove: for x in reversed(remove): eq.pop(x) - #if there are still some items left in the queue, re-schedule: + # if there are still some items left in the queue, re-schedule: if not still_due: avsynclog("encode_from_queue: nothing due") return @@ -1148,7 +1143,6 @@ def encode_from_queue(self) -> None: first_due, still_due, self.av_sync_delay, av_delay, self.wid) self.idle_add(self.schedule_encode_from_queue, first_due) - def update_encoding_video_subregion(self) -> None: """ We may need to update the video subregion based on the change in encoding, @@ -1158,15 +1152,15 @@ def update_encoding_video_subregion(self) -> None: vs = self.video_subregion if not vs: return - if self.encoding=="stream": + if self.encoding == "stream": vs.reset() return if (self.encoding not in ("auto", "grayscale") and self.encoding not in self.common_video_encodings) or \ self.full_frames_only or STRICT_MODE or not self.non_video_encodings or not self.common_video_encodings or \ (self.content_type.find("text")>=0 and TEXT_USE_VIDEO) or \ - self._mmap_size>0: - #cannot use video subregions - #FIXME: small race if a refresh timer is due when we change encoding - meh + self._mmap_size > 0: + # cannot use video subregions + # FIXME: small race if a refresh timer is due when we change encoding - meh vs.reset() return old = vs.rectangle @@ -1180,26 +1174,26 @@ def update_encoding_video_subregion(self) -> None: newrect = vs.rectangle if ((newrect is None) ^ (old is None)) or newrect!=old: if old is None and newrect and newrect.get_geometry()==(0, 0, ww, wh): - #not actually changed! - #the region is the whole window + # not actually changed! + # the region is the whole window pass elif newrect is None and old and old.get_geometry()==(0, 0, ww, wh): - #not actually changed! - #the region is the whole window + # not actually changed! + # the region is the whole window pass else: videolog("video subregion was %s, now %s (window size: %i,%i)", old, newrect, ww, wh) self.cleanup_codecs() if newrect: - #remove this from regular refresh: + # remove this from regular refresh: if old is None or old!=newrect: refreshlog("identified new video region: %s", newrect) - #figure out if the new region had pending regular refreshes: + # figure out if the new region had pending regular refreshes: subregion_needs_refresh = any(newrect.intersects_rect(x) for x in self.refresh_regions) if old: - #we don't bother subtracting new and old (too complicated) + # we don't bother subtracting new and old (too complicated) refreshlog("scheduling refresh of old region: %s", old) - #this may also schedule a refresh: + # this may also schedule a refresh: super().add_refresh_region(old) super().remove_refresh_region(newrect) if not self.refresh_regions: @@ -1209,7 +1203,7 @@ def update_encoding_video_subregion(self) -> None: else: refreshlog("video region unchanged: %s - no change in refresh", newrect) elif old: - #add old region to regular refresh: + # add old region to regular refresh: refreshlog("video region cleared, scheduling refresh of old region: %s", old) self.add_refresh_region(old) vs.cancel_refresh_timer() @@ -1243,8 +1237,8 @@ def update_pipeline_scores(self, force_reload=False) -> None: Can be called from any thread. """ - #start with simple sanity checks: - if self._mmap_size>0: + # start with simple sanity checks: + if self._mmap_size > 0: scorelog("cannot score: mmap enabled") return if self.is_cancelled(): @@ -1255,15 +1249,15 @@ def update_pipeline_scores(self, force_reload=False) -> None: return if not self.pixel_format: scorelog("cannot score: no pixel format!") - #we need to know what pixel format we create pipelines for! + # we need to know what pixel format we create pipelines for! return def checknovideo(*info): - #for whatever reason, we shouldn't be using a video encoding, - #get_best_encoding() should ensure we don't end up with one - #it duplicates some of these same checks + # for whatever reason, we shouldn't be using a video encoding, + # get_best_encoding() should ensure we don't end up with one + # it duplicates some of these same checks scorelog(*info) self.cleanup_codecs() - #which video encodings to evaluate: + # which video encodings to evaluate: if self.encoding in ("auto", "stream", "grayscale"): if not self.common_video_encodings: return checknovideo("no common video encodings") @@ -1281,17 +1275,17 @@ def checknovideo(*info): if r: w = r.width & self.width_mask h = r.height & self.width_mask - if wself.max_w or hself.max_h: + if w < self.min_w or w > self.max_w or h < self.min_h or h > self.max_h: checknovideo("out of bounds: %sx%s (min %sx%s, max %sx%s)", w, h, self.min_w, self.min_h, self.max_w, self.max_h) return - #more sanity checks to see if there is any point in finding a suitable video encoding pipeline: - if self._sequence<2: + # more sanity checks to see if there is any point in finding a suitable video encoding pipeline: + if self._sequence < 2: #too early, or too late! checknovideo(f"not scoring: sequence={self._sequence}") return - #must copy reference to those objects because of threading races: + # must copy reference to those objects because of threading races: ve = self._video_encoder csce = self._csc_encoder if ve is not None and ve.is_closed(): @@ -1305,8 +1299,8 @@ def checknovideo(*info): max_elapsed = 0.75 if self.is_idle: max_elapsed = 60 - if not force_reload and elapsed bool: if not isinstance(ve, encoder_spec.codec_class): scorelog(f" found a better video encoder class than {type(ve)}: {encoder_spec.codec_class}") return False - #everything is still valid: + # everything is still valid: return True def get_video_pipeline_options(self, encodings : tuple[str,...], width : int, height : int, src_format : str) -> tuple: @@ -1378,23 +1372,23 @@ def get_video_pipeline_options(self, encodings : tuple[str,...], width : int, he """ vh = self.video_helper if vh is None: - return () #closing down + return () # closing down target_q = int(self._current_quality) min_q = self._fixed_min_quality target_s = int(self._current_speed) min_s = self._fixed_min_speed - #tune quality target for (non-)video region: + # tune quality target for (non-)video region: vr = self.matches_video_subregion(width, height) - if vr and target_q<100: + if vr and target_q < 100: if self.subregion_is_video(): - #lower quality a bit more: + # lower quality a bit more: fps = self.video_subregion.fps f = min(90, 2*fps) target_q = max(min_q, int(target_q*(100-f)//100)) scorelog("lowering quality target %i by %i%% for video %s (fps=%i)", target_q, f, vr, fps) else: - #not the video region, or not really video content, raise quality a bit: + # not the video region, or not really video content, raise quality a bit: target_q = int(sqrt(target_q/100.0)*100) scorelog("raising quality for video encoding of non-video region") scorelog("get_video_pipeline_options%s speed: %s (min %s), quality: %s (min %s)", @@ -1403,8 +1397,8 @@ def get_video_pipeline_options(self, encodings : tuple[str,...], width : int, he ffps = self.get_video_fps(width, height) scores = [] for encoding in encodings: - #these are the CSC modes the client can handle for this encoding: - #we must check that the output csc mode for each encoder is one of those + # these are the CSC modes the client can handle for this encoding: + # we must check that the output csc mode for each encoder is one of those supported_csc_modes = self.full_csc_modes.strtupleget(encoding) if not supported_csc_modes: scorelog(" no supported csc modes for %s", encoding) @@ -1413,9 +1407,9 @@ def get_video_pipeline_options(self, encodings : tuple[str,...], width : int, he if not encoder_specs: scorelog(" no encoder specs for %s", encoding) continue - #if not specified as an encoding option, - #discount encodings further down the list of preferred encodings: - #(ie: prefer h264 to vp9) + # if not specified as an encoding option, + # discount encodings further down the list of preferred encodings: + # (ie: prefer h264 to vp9) try: encoding_score_delta = len(PREFERRED_ENCODING_ORDER)//2-list(PREFERRED_ENCODING_ORDER).index(encoding) except ValueError: @@ -1423,14 +1417,14 @@ def get_video_pipeline_options(self, encodings : tuple[str,...], width : int, he encoding_score_delta = self.encoding_options.get(f"{encoding}.score-delta", encoding_score_delta) no_match = [] def add_scores(info, csc_spec, enc_in_format): - #find encoders that take 'enc_in_format' as input: + # find encoders that take 'enc_in_format' as input: colorspace_specs = encoder_specs.get(enc_in_format) if not colorspace_specs: no_match.append(info) return - #log("%s encoding from %s: %s", info, pixel_format, colorspace_specs) + # log("%s encoding from %s: %s", info, pixel_format, colorspace_specs) for encoder_spec in colorspace_specs: - #ensure that the output of the encoder can be processed by the client: + # ensure that the output of the encoder can be processed by the client: matches = tuple(x for x in encoder_spec.output_colorspaces if x in supported_csc_modes) if not matches or self.is_cancelled(): no_match.append(encoder_spec.codec_type+" "+info) @@ -1442,8 +1436,8 @@ def add_scores(info, csc_spec, enc_in_format): else: scaling = (1, 1) score_delta = encoding_score_delta - if self.is_shadow and enc_in_format in ("NV12", "YUV420P", "YUV422P") and scaling==(1, 1): - #avoid subsampling with shadow servers: + if self.is_shadow and enc_in_format in ("NV12", "YUV420P", "YUV422P") and scaling == (1, 1): + # avoid subsampling with shadow servers: score_delta -= 40 vs = self.video_subregion detection = bool(vs) and vs.detection @@ -1456,17 +1450,17 @@ def add_scores(info, csc_spec, enc_in_format): else: scorelog(" no score data for %s", (enc_in_format, csc_spec, encoder_spec, width, height, scaling, "..")) - if not FORCE_CSC or src_format==FORCE_CSC_MODE: + if not FORCE_CSC or src_format == FORCE_CSC_MODE: add_scores(f"direct (no csc) {src_format}", None, src_format) - #now add those that require a csc step: + # now add those that require a csc step: csc_specs = vh.get_csc_specs(src_format) if csc_specs: - #log("%s can also be converted to %s using %s", + # log("%s can also be converted to %s using %s", # pixel_format, [x[0] for x in csc_specs], set(x[1] for x in csc_specs)) - #we have csc module(s) that can get us from pixel_format to out_csc: + # we have csc module(s) that can get us from pixel_format to out_csc: for out_csc, l in csc_specs.items(): - if not bool(FORCE_CSC_MODE) or FORCE_CSC_MODE==out_csc: + if not bool(FORCE_CSC_MODE) or FORCE_CSC_MODE == out_csc: for csc_spec in l: add_scores(f"via {out_csc}", csc_spec, out_csc) scorelog("no matching colorspace specs for %s: %s", encoding, no_match) @@ -1486,26 +1480,26 @@ def get_video_fps(self, width : int, height : int) -> int: mvsub = self.matches_video_subregion(width, height) vs = self.video_subregion if vs and mvsub: - #matches the video subregion, - #for which we have the fps already: + # matches the video subregion, + # for which we have the fps already: return self.video_subregion.fps return self.do_get_video_fps(width, height) def do_get_video_fps(self, width : int, height : int) -> int: now = monotonic() - #calculate full frames per second (measured in pixels vs window size): + # calculate full frames per second (measured in pixels vs window size): stime = now-5 #only look at the last 5 seconds max - lde = tuple((t,w,h) for t,_,_,w,h in tuple(self.statistics.last_damage_events) if t>stime) + lde = tuple((t,w,h) for t,_,_,w,h in tuple(self.statistics.last_damage_events) if t > stime) if len(lde)>=10: - #the first event's first element is the oldest event time: + # the first event's first element is the oldest event time: otime = lde[0][0] - if now>otime: + if now > otime: pixels = sum(w*h for _,w,h in lde) return int(pixels/(width*height)/(now - otime)) return 0 def calculate_scaling(self, width : int, height : int, max_w : int=4096, max_h : int=4096) -> tuple[int,int]: - if width==0 or height==0: + if width == 0 or height == 0: return (1, 1) now = monotonic() crs = None @@ -1515,14 +1509,14 @@ def get_min_required_scaling(default_value=(1, 1)) -> tuple[int, int]: mw = max_w mh = max_h if crs: - #if the client is going to downscale things anyway, - #then there is no need to send at a higher resolution than that: + # if the client is going to downscale things anyway, + # then there is no need to send at a higher resolution than that: crsw, crsh = crs - if crsw tuple[int, int]: sopts[num/den] = (num, den) for ratio in reversed(sorted(sopts.keys())): num, den = sopts[ratio] - if num==1 and den==1: + if num == 1 and den == 1: continue - if width*num/den<=mw and height*num/den<=mh: + if width*num/den <= mw and height*num/den <= mh: return (num, den) raise ValueError(f"BUG: failed to find a scaling value for window size {width}x{height}") def mrs(v=(1, 1), info="using minimum required scaling"): @@ -1542,7 +1536,7 @@ def mrs(v=(1, 1), info="using minimum required scaling"): scalinglog("%s: %s", info, sv) return sv if not SCALING: - if (width>max_w or height>max_h) and first_time("scaling-required"): + if (width > max_w or height > max_h) and first_time("scaling-required"): if not SCALING: scalinglog.warn("Warning: video scaling is disabled") else: @@ -1552,88 +1546,88 @@ def mrs(v=(1, 1), info="using minimum required scaling"): return 1, 1 if SCALING_HARDCODED: return mrs(tuple(SCALING_HARDCODED), "using hardcoded scaling value") - if self.scaling_control==0: + if self.scaling_control == 0: #video-scaling is disabled, only use scaling if we really have to: return mrs(info="scaling is disabled, using minimum") if self.scaling: #honour value requested for this window, unless we must scale more: return mrs(self.scaling, info="using scaling specified") - if now-self.statistics.last_resized<0.5: + if now-self.statistics.last_resized < 0.5: return mrs(self.actual_scaling, "unchanged scaling during resizing") - if now-self.last_scroll_time<5: + if now-self.last_scroll_time < 5: return mrs(self.actual_scaling, "unchanged scaling during scrolling") - if self.statistics.damage_events_count<=50: - #not enough data yet: + if self.statistics.damage_events_count <= 50: + # not enough data yet: return mrs(info="waiting for more events, using minimum") - #use heuristics to choose the best scaling ratio: + # use heuristics to choose the best scaling ratio: mvsub = self.matches_video_subregion(width, height) video = self.content_type.find("video")>=0 or (bool(mvsub) and self.subregion_is_video()) ffps = self.get_video_fps(width, height) q = self._current_quality s = self._current_speed if self.scaling_control is None: - #None==auto mode, derive from quality and speed only: - #increase threshold when handling video + # None == auto mode, derive from quality and speed only: + # increase threshold when handling video q_noscaling = 65 + int(video)*30 - if q>=q_noscaling or ffps==0: + if q >= q_noscaling or ffps == 0: scaling = get_min_required_scaling() else: pps = ffps*width*height #Pixels/s - if self.bandwidth_limit>0: - #assume video compresses pixel data by ~95% (size is 20 times smaller) - #(and convert to bytes per second) - #ie: 240p minimum target + if self.bandwidth_limit > 0: + # assume video compresses pixel data by ~95% (size is 20 times smaller) + # (and convert to bytes per second) + # ie: 240p minimum target target = max(SCALING_MIN_PPS, self.bandwidth_limit//8*20) else: target = SCALING_PPS_TARGET #ie: 1080p if self.is_shadow: - #shadow servers look ugly when scaled: + # shadow servers look ugly when scaled: target *= 16 elif self.content_type.find("text")>=0: - #try to avoid scaling: + # try to avoid scaling: target *= 4 elif not video: - #downscale non-video content less: + # downscale non-video content less: target *= 2 - if self.image_depth==30: - #high bit depth is normally used for high quality + if self.image_depth == 30: + # high bit depth is normally used for high quality target *= 10 - #high quality means less scaling: + # high quality means less scaling: target = target * (10+max(0, q-video*30))**2 // 50**2 - #high speed means more scaling: + # high speed means more scaling: target = target * 60**2 // (s+20)**2 sscaling = {} mrs_num, mrs_den = get_min_required_scaling() min_ratio = mrs_num/mrs_den denom_mult = 1 if crs: - #if the client will be downscaling to paint the window - #use this downscaling as minimum value: + # if the client will be downscaling to paint the window + # use this downscaling as minimum value: crsw, crsh = crs - if crsw>0 and crsh>0 and width-crsw>DOWNSCALE_THRESHOLD and height-crsh>DOWNSCALE_THRESHOLD: - if width/crsw>height/crsh: + if crsw > 0 and crsh > 0 and width-crsw > DOWNSCALE_THRESHOLD and height-crsh > DOWNSCALE_THRESHOLD: + if width/crsw > height/crsh: denom_mult = width/crsw else: denom_mult = height/crsh for num, denom in SCALING_OPTIONS: - if denom_mult>1.0 and 0.5<(num/denom)<1.0: - #skip ratios like 2/3 - #since we want a whole multiple of the client scaling value + if denom_mult > 1.0 and 0.5 < (num/denom) < 1.0: + # skip ratios like 2/3 + # since we want a whole multiple of the client scaling value continue - #scaled pixels per second value: + # scaled pixels per second value: denom *= denom_mult spps = pps*(num**2)/(denom**2) ratio = target/spps - #ideal ratio is 1, measure distance from 1: + # ideal ratio is 1, measure distance from 1: score = int(abs(1-ratio)*100) - if self.actual_scaling and self.actual_scaling==(num, denom) and (num!=1 or denom!=1): - #if we are already downscaling, - #try to stick to the same value longer: - #give it a score boost (lowest score wins): + if self.actual_scaling and self.actual_scaling == (num, denom) and (num!=1 or denom!=1): + # if we are already downscaling, + # try to stick to the same value longer: + # give it a score boost (lowest score wins): score = int(score/1.5) - if num/denom>min_ratio: - #higher than minimum, should not be used unless we have no choice: + if num/denom > min_ratio: + # higher than minimum, should not be used unless we have no choice: score = int(score*100) sscaling[score] = (num, denom) scalinglog("calculate_scaling%s wid=%i, pps=%s, target=%s, denom_mult=%s, scores=%s", @@ -1644,58 +1638,57 @@ def mrs(v=(1, 1), info="using minimum required scaling"): else: scaling = get_min_required_scaling() else: - #calculate scaling based on the "video-scaling" command line option, - #which is named "scaling_control" here. - #(from 1 to 100, from least to most aggressive) + # calculate scaling based on the "video-scaling" command line option, + # which is named "scaling_control" here. + # (from 1 to 100, from least to most aggressive) if mvsub: if video: - #enable scaling more aggressively + # enable scaling more aggressively sc = (self.scaling_control+50)*2 else: sc = (self.scaling_control+25) else: - #not the video region, so much less aggressive scaling: + # not the video region, so much less aggressive scaling: sc = max(0, (self.scaling_control-50)//2) - #if scaling_control is high (scaling_control=100 -> er=2) - #then we will match the heuristics more quickly: + # if scaling_control is high (scaling_control=100 -> er=2) + # then we will match the heuristics more quickly: er = sc/50.0 if self.actual_scaling!=(1, 1): - #if we are already downscaling, boost the score so we will stick with it a bit longer: - #more so if we are downscaling a lot (1/3 -> er=1.5 + ..) + # if we are already downscaling, boost the score so we will stick with it a bit longer: + # more so if we are downscaling a lot (1/3 -> er=1.5 + ..) er += (0.5 * self.actual_scaling[1] / self.actual_scaling[0]) - qs = s>(q-er*10) and q<(50+er*15) - #scalinglog("calculate_scaling: er=%.1f, qs=%s, ffps=%s", er, qs, ffps) - if self.fullscreen and (qs or ffps>=max(2, round(10-er*3))): + qs = s > (q-er*10) and q < (50+er*15) + # scalinglog("calculate_scaling: er=%.1f, qs=%s, ffps=%s", er, qs, ffps) + if self.fullscreen and (qs or ffps >= max(2, round(10-er*3))): scaling = 1,3 - elif self.maximized and (qs or ffps>=max(2, round(10-er*3))): + elif self.maximized and (qs or ffps >= max(2, round(10-er*3))): scaling = 1,2 - elif width*height>=(2560-er*768)*1600 and (qs or ffps>=max(4, round(25-er*5))): + elif width*height >= (2560-er*768)*1600 and (qs or ffps >= max(4, round(25-er*5))): scaling = 1,3 - elif width*height>=(1920-er*384)*1200 and (qs or ffps>=max(5, round(30-er*10))): + elif width*height >= (1920-er*384)*1200 and (qs or ffps >= max(5, round(30-er*10))): scaling = 2,3 - elif width*height>=(1200-er*256)*1024 and (qs or ffps>=max(10, round(50-er*15))): + elif width*height >= (1200-er*256)*1024 and (qs or ffps >= max(10, round(50-er*15))): scaling = 2,3 else: scaling = 1,1 if scaling: scalinglog("calculate_scaling value %s enabled by heuristics for %ix%i q=%i, s=%i, er=%.1f, qs=%s, ffps=%i, scaling-control(%i)=%i", scaling, width, height, q, s, er, qs, ffps, self.scaling_control, sc) - #sanity checks: + # sanity checks: if scaling is None: scaling = 1, 1 v, u = scaling - if v/u>1.0: - #never upscale before encoding! + if v/u > 1.0: + # never upscale before encoding! scaling = 1, 1 - elif v/u<0.1: - #don't downscale more than 10 times! (for each dimension - that's 100 times!) + elif v/u < 0.1: + # don't downscale more than 10 times! (for each dimension - that's 100 times!) scaling = 1, 10 scalinglog("calculate_scaling%s=%s (q=%s, s=%s, scaling_control=%s)", (width, height, max_w, max_h), scaling, q, s, self.scaling_control) return scaling - def check_pipeline(self, encoding : str, width : int, height : int, src_format : str): """ Checks that the current pipeline is still valid @@ -1712,10 +1705,10 @@ def check_pipeline(self, encoding : str, width : int, height : int, src_format : videolog("check_pipeline%s setting up a new pipeline as check failed - encodings=%s", (encoding, width, height, src_format), encodings) - #cleanup existing one if needed: + # cleanup existing one if needed: self.csc_clean(self._csc_encoder) self.ve_clean(self._video_encoder) - #and make a new one: + # and make a new one: w = width & self.width_mask h = height & self.height_mask scores = self.get_video_pipeline_options(encodings, w, h, src_format) @@ -1728,8 +1721,8 @@ def do_check_pipeline(self, encodings : tuple[str,...], width : int, height : in Runs in the 'encode' thread. """ - #use aliases, not because of threading (we are in the encode thread anyway) - #but to make the code less dense: + # use aliases, not because of threading (we are in the encode thread anyway) + # but to make the code less dense: ve = self._video_encoder csce = self._csc_encoder if ve is None: @@ -1763,11 +1756,11 @@ def do_check_pipeline(self, encodings : tuple[str,...], width : int, height : in print_nested_dict(ve.get_info(), " ", print_fn=csclog.error) return False - #encoder will take its input from csc: + # encoder will take its input from csc: encoder_src_width = csce.get_dst_width() encoder_src_height = csce.get_dst_height() else: - #direct to video encoder without csc: + # direct to video encoder without csc: encoder_src_width = width & self.width_mask encoder_src_height = height & self.height_mask @@ -1786,7 +1779,6 @@ def do_check_pipeline(self, encodings : tuple[str,...], width : int, height : in return False return True - def setup_pipeline(self, scores : tuple, width : int, height : int, src_format : str): """ Given a list of pipeline options ordered by their score @@ -1797,7 +1789,7 @@ def setup_pipeline(self, scores : tuple, width : int, height : int, src_format : Runs in the 'encode' thread. """ - if width<=0 or height<=0: + if width <= 0 or height <= 0: raise RuntimeError(f"invalid dimensions: {width}x{height}") start = monotonic() if not scores: @@ -1810,9 +1802,9 @@ def setup_pipeline(self, scores : tuple, width : int, height : int, src_format : try: videolog("setup_pipeline: trying %s", option) if self.setup_pipeline_option(width, height, src_format, *option): - #success! + # success! return True - #skip cleanup below + # skip cleanup below continue except TransientCodecException as e: if self.is_cancelled(): @@ -1825,7 +1817,7 @@ def setup_pipeline(self, scores : tuple, width : int, height : int, src_format : if self.is_cancelled(): return False videolog.warn("Warning: failed to setup video pipeline %s", option, exc_info=True) - #we're here because an exception occurred, cleanup before trying again: + # we're here because an exception occurred, cleanup before trying again: self.csc_clean(self._csc_encoder) self.ve_clean(self._video_encoder) end = monotonic() @@ -1847,15 +1839,15 @@ def setup_pipeline_option(self, width : int, height : int, src_format : str, max_w = 16384 max_h = 16384 if csc_spec: - #TODO: no need to OR encoder mask if we are scaling... + # TODO: no need to OR encoder mask if we are scaling... width_mask = csc_spec.width_mask & encoder_spec.width_mask height_mask = csc_spec.height_mask & encoder_spec.height_mask min_w = max(min_w, csc_spec.min_w) min_h = max(min_h, csc_spec.min_h) max_w = min(max_w, csc_spec.max_w) max_h = min(max_h, csc_spec.max_h) - #csc speed is not very important compared to encoding speed, - #so make sure it never degrades quality + # csc speed is not very important compared to encoding speed, + # so make sure it never degrades quality speed = options.get("speed", self._current_speed) quality = options.get("quality", self._current_quality) csc_speed = max(1, min(speed, 100-quality/2.0)) @@ -1869,10 +1861,10 @@ def setup_pipeline_option(self, width : int, height : int, src_format : str, csce, csce.get_info(), (csc_end-csc_start)*1000.0) else: csce = None - #use the encoder's mask directly since that's all we have to worry about! + # use the encoder's mask directly since that's all we have to worry about! width_mask = encoder_spec.width_mask height_mask = encoder_spec.height_mask - #restrict limits: + # restrict limits: min_w = max(min_w, encoder_spec.min_w) min_h = max(min_h, encoder_spec.min_h) max_w = min(max_w, encoder_spec.max_w) @@ -1882,11 +1874,11 @@ def setup_pipeline_option(self, width : int, height : int, src_format : str, return False self._csc_encoder = csce enc_start = monotonic() - #FIXME: filter dst_formats to only contain formats the encoder knows about? + # FIXME: filter dst_formats to only contain formats the encoder knows about? dst_formats = self.full_csc_modes.strtupleget(encoder_spec.encoding) ve = encoder_spec.make_instance() options.update(self.get_video_encoder_options(encoder_spec.encoding, width, height)) - if self.encoding=="grayscale": + if self.encoding == "grayscale": options["grayscale"] = True if encoder_scaling!=(1, 1): n, d = encoder_scaling @@ -1896,7 +1888,7 @@ def setup_pipeline_option(self, width : int, height : int, src_format : str, options["dst-formats"] = dst_formats ve.init_context(encoder_spec.encoding, enc_width, enc_height, enc_in_format, options) - #record new actual limits: + # record new actual limits: self.actual_scaling = scaling self.width_mask = width_mask self.height_mask = height_mask @@ -1913,13 +1905,13 @@ def setup_pipeline_option(self, width : int, height : int, src_format : str, return True def get_video_encoder_options(self, encoding, width, height) -> dict[str,Any]: - #tweaks for "real" video: + # tweaks for "real" video: opts = {} if self.cuda_device_context: opts["cuda-device-context"] = self.cuda_device_context - if not self._fixed_quality and not self._fixed_speed and self._fixed_min_quality<50: - #only allow bandwidth to drive video encoders - #when we don't have strict quality or speed requirements: + if not self._fixed_quality and not self._fixed_speed and self._fixed_min_quality < 50: + # only allow bandwidth to drive video encoders + # when we don't have strict quality or speed requirements: opts["bandwidth-limit"] = self.bandwidth_limit if self.content_type: content_type = self.content_type @@ -1929,7 +1921,7 @@ def get_video_encoder_options(self, encoding, width, height) -> dict[str,Any]: content_type = None if content_type: opts["content-type"] = content_type - if content_type=="video": + if content_type == "video": if B_FRAMES and (encoding in self.supports_video_b_frames): opts["b-frames"] = True return opts @@ -1944,13 +1936,13 @@ def get_fail_cb(self, packet) -> Callable | None: def make_draw_packet(self, x : int, y : int, w : int, h : int, coding : str, data, outstride : int, client_options, options) -> tuple: - #overridden so we can invalidate the scroll data: - #log.error("make_draw_packet%s", (x, y, w, h, coding, "..", outstride, client_options) + # overridden so we can invalidate the scroll data: + # log.error("make_draw_packet%s", (x, y, w, h, coding, "..", outstride, client_options) packet = super().make_draw_packet(x, y, w, h, coding, data, outstride, client_options, options) sd = self.scroll_data if sd and not options.get("scroll"): if client_options.get("scaled_size") or client_options.get("quality", 100)<20: - #don't scroll very low quality content, better to refresh it + # don't scroll very low quality content, better to refresh it scrolllog("low quality %s update, invalidating all scroll data (scaled_size=%s, quality=%s)", coding, client_options.get("scaled_size"), client_options.get("quality", 100)) self.do_free_scroll_data() @@ -1958,7 +1950,6 @@ def make_draw_packet(self, x : int, y : int, w : int, h : int, sd.invalidate(x, y, w, h) return packet - def free_scroll_data(self) -> None: self.call_in_encode_thread(False, self.do_free_scroll_data) @@ -1972,7 +1963,7 @@ def do_free_scroll_data(self) -> None: def may_use_scrolling(self, image : ImageWrapper, options) -> bool: scrolllog("may_use_scrolling(%s, %s) supports_scrolling=%s, has_pixels=%s, content_type=%s, non-video encodings=%s", image, options, self.supports_scrolling, image.has_pixels, self.content_type, self.non_video_encodings) - if self._mmap_size>0 and self.encoding!="scroll": + if self._mmap_size > 0 and self.encoding!="scroll": scrolllog("no scrolling: using mmap") return False if not self.supports_scrolling: @@ -1981,8 +1972,8 @@ def may_use_scrolling(self, image : ImageWrapper, options) -> bool: if options.get("auto_refresh"): scrolllog("no scrolling: auto-refresh") return False - #don't download the pixels if we have a GPU buffer, - #since that means we're likely to be able to compress on the GPU too with NVENC: + # don't download the pixels if we have a GPU buffer, + # since that means we're likely to be able to compress on the GPU too with NVENC: if not image.has_pixels(): return False if self.content_type.find("video")>=0 or not self.non_video_encodings: @@ -1990,7 +1981,7 @@ def may_use_scrolling(self, image : ImageWrapper, options) -> bool: return False w = image.get_width() h = image.get_height() - if w bool: self.do_free_scroll_data() return False speed = options.get("speed", self._current_speed) - if speed>=50 or self.scroll_preference<100: + if speed >= 50 or self.scroll_preference < 100: now = monotonic() scroll_event_elapsed = now-self.last_scroll_event scroll_encode_elapsed = now-self.last_scroll_time - #how long since we last successfully used scroll encoding, - #or seen a scroll mouse wheel event: + # how long since we last successfully used scroll encoding, + # or seen a scroll mouse wheel event: scroll_elapsed = min(scroll_event_elapsed, scroll_encode_elapsed) max_time = 1+min((100-speed)/10, self.scroll_preference/20) - if scroll_elapsed>=max_time: + if scroll_elapsed >= max_time: scrolllog("no scrolling: elapsed=%.1f, max time=%.1f", scroll_elapsed, max_time) return False return self.do_scroll_encode(image, options, self.scroll_min_percent) def scroll_encode(self, coding : str, image: ImageWrapper, options) -> None: - assert coding=="scroll" + assert coding == "scroll" self.do_scroll_encode(image, options, 0) - #do_scroll_encode() sends the packets - #so there is nothing to return: + # do_scroll_encode() sends the packets + # so there is nothing to return: return None def do_scroll_encode(self, image : ImageWrapper, options, min_percent : int=0) -> bool: @@ -2029,7 +2020,7 @@ def do_scroll_encode(self, image : ImageWrapper, options, min_percent : int=0) - if options.get("scroll") is True: scrolllog("no scrolling: detection has already been used on this image") return False - if w>=32000 or h>=32000: + if w >= 32000 or h >= 32000: scrolllog("no scrolling: the image is too large, %ix%i", w, h) return False try: @@ -2040,11 +2031,11 @@ def do_scroll_encode(self, image : ImageWrapper, options, min_percent : int=0) - self.scroll_data = scroll_data scrolllog("new scroll data: %s", scroll_data) if not image.is_thread_safe(): - #what we really want is to check that the frame has been frozen, - #so it doesn't get modified whilst we checksum or encode it, - #the "thread_safe" flag gives us that for the X11 case in most cases, - #(the other servers already copy the pixels from the "real" screen buffer) - #TODO: use a separate flag? (ximage uses this flag to know if it is safe + # what we really want is to check that the frame has been frozen, + # so it doesn't get modified whilst we checksum or encode it, + # the "thread_safe" flag gives us that for the X11 case in most cases, + # (the other servers already copy the pixels from the "real" screen buffer) + # TODO: use a separate flag? (ximage uses this flag to know if it is safe # to call image.free from another thread - which is theoretically more restrictive) newstride = roundup(image.get_width()*image.get_bytesperpixel(), 2) image.restride(newstride) @@ -2056,9 +2047,9 @@ def do_scroll_encode(self, image : ImageWrapper, options, min_percent : int=0) - scroll_data.update(pixels, x, y, w, h, stride, bpp) max_distance = min(1000, (100-min_percent)*h//100) scroll_data.calculate(max_distance) - #marker telling us not to invalidate the scroll data from here on: + # marker telling us not to invalidate the scroll data from here on: options["scroll"] = True - if min_percent>0: + if min_percent > 0: max_zones = 20 scroll, count = scroll_data.get_best_match() end = monotonic() @@ -2068,8 +2059,8 @@ def do_scroll_encode(self, image : ImageWrapper, options, min_percent : int=0) - else: max_zones = 50 match_pct = min_percent - #if enough scrolling is detected, use scroll encoding for this frame: - if match_pct>=min_percent: + # if enough scrolling is detected, use scroll encoding for this frame: + if match_pct >= min_percent: self.encode_scrolling(scroll_data, image, options, match_pct, max_zones) return True except Exception: @@ -2077,16 +2068,16 @@ def do_scroll_encode(self, image : ImageWrapper, options, min_percent : int=0) - if not self.is_cancelled(): scrolllog.error("Error during scrolling detection") scrolllog.error(" with image=%s, options=%s", image, options, exc_info=True) - #make sure we start again from scratch next time: + # make sure we start again from scratch next time: self.do_free_scroll_data() return False def encode_scrolling(self, scroll_data, image : ImageWrapper, options, match_pct : int, max_zones : int=20) -> None: - #generate all the packets for this screen update - #using 'scroll' encoding and picture encodings for the other regions + # generate all the packets for this screen update + # using 'scroll' encoding and picture encodings for the other regions start = monotonic() options.pop("av-sync", None) - #tells make_data_packet not to invalidate the scroll data: + # tells make_data_packet not to invalidate the scroll data: ww, wh = self.window_dimensions scrolllog("encode_scrolling([], %s, %s, %i, %i) window-dimensions=%s", image, options, match_pct, max_zones, (ww, wh)) @@ -2095,40 +2086,40 @@ def encode_scrolling(self, scroll_data, image : ImageWrapper, options, match_pct w = image.get_width() h = image.get_height() raw_scroll, non_scroll = {}, {0 : h} - if x+w>ww or y+h>wh: - #window may have been resized + if x+w > ww or y+h > wh: + # window may have been resized pass else: v = scroll_data.get_scroll_values() if v: raw_scroll, non_scroll = v if len(raw_scroll)>=max_zones or len(non_scroll)>=max_zones: - #avoid fragmentation, which is too costly - #(too many packets, too many loops through the encoder code) + # avoid fragmentation, which is too costly + # (too many packets, too many loops through the encoder code) scrolllog("too many items: %i scrolls, %i non-scrolls - sending just one image instead", len(raw_scroll), len(non_scroll)) raw_scroll = {} non_scroll = {0 : h} scrolllog(" will send scroll data=%s, non-scroll=%s", raw_scroll, non_scroll) flush = len(non_scroll) - #convert to a screen rectangle list for the client: + # convert to a screen rectangle list for the client: scrolls = [] for scroll, line_defs in raw_scroll.items(): - if scroll==0: + if scroll == 0: continue for line, count in line_defs.items(): - if y+line+scroll<0: + if y+line+scroll < 0: raise RuntimeError(f"cannot scroll rectangle by {scroll} lines from {y}+{line}") - if y+line+scroll>wh: + if y+line+scroll > wh: raise RuntimeError(f"cannot scroll rectangle {count} high "+ f"by {scroll} lines from {y}+{line} (window height is {wh})") scrolls.append((x, y+line, w, count, 0, scroll)) del raw_scroll - #send the scrolls if we have any - #(zero change scrolls have been removed - so maybe there are none) + # send the scrolls if we have any + # (zero change scrolls have been removed - so maybe there are none) if scrolls: client_options = {} - if flush>0: + if flush > 0: client_options["flush"] = flush coding = "scroll" end = monotonic() @@ -2140,14 +2131,14 @@ def encode_scrolling(self, scroll_data, image : ImageWrapper, options, match_pct len(scrolls), w*h*4/1024, self._damage_packet_sequence, client_options, options) del scrolls - #send the rest as rectangles: + # send the rest as rectangles: if non_scroll: if self.content_type.find("text")>=0: quality = 100 options["quality"] = quality - #boost quality a bit, because lossless saves refreshing, - #more so if we have a high match percentage (less to send): - elif self._fixed_quality<=0: + # boost quality a bit, because lossless saves refreshing, + # more so if we have a high match percentage (less to send): + elif self._fixed_quality <= 0: quality = options.get("quality", self._current_quality) quality = min(100, quality + max(60, match_pct)//2) options["quality"] = quality @@ -2169,9 +2160,9 @@ def encode_scrolling(self, scroll_data, image : ImageWrapper, options, match_pct if not data: raise RuntimeError(f"no data from {encoding} function {encode_fn}") flush -= 1 - if flush>0: + if flush > 0: client_options["flush"] = flush - #if SAVE_TO_FILE: + # if SAVE_TO_FILE: # #hard-coded for BGRA! # from xpra.os_util import memoryview_to_bytes # from PIL import Image @@ -2201,17 +2192,17 @@ def encode_scrolling(self, scroll_data, image : ImageWrapper, options, match_pct def do_schedule_auto_refresh(self, encoding : str, data, region, client_options, options) -> None: - #for scroll encoding, data is a LargeStructure wrapper: - if encoding=="scroll" and hasattr(data, "data"): + # for scroll encoding, data is a LargeStructure wrapper: + if encoding == "scroll" and hasattr(data, "data"): if not self.refresh_regions: return - #check if any pending refreshes intersect the area containing the scroll data: + # check if any pending refreshes intersect the area containing the scroll data: if not any(region.intersects_rect(r) for r in self.refresh_regions): #nothing to do! return pixels_added = 0 for x, y, w, h, dx, dy in data.data: - #the region that moved + # the region that moved src_rect = rectangle(x, y, w, h) for rect in self.refresh_regions: inter = src_rect.intersection_rect(rect) @@ -2219,15 +2210,15 @@ def do_schedule_auto_refresh(self, encoding : str, data, region, client_options, dst_rect = rectangle(inter.x+dx, inter.y+dy, inter.width, inter.height) pixels_added += self.add_refresh_region(dst_rect) if pixels_added: - #if we end up with too many rectangles, - #bail out and simplify: + # if we end up with too many rectangles, + # bail out and simplify: if len(self.refresh_regions)>=200: self.refresh_regions = [merge_all(self.refresh_regions)] refreshlog("updated refresh regions with scroll data: %i pixels added", pixels_added) refreshlog(" refresh_regions=%s", self.refresh_regions) - #we don't change any of the refresh scheduling - #if there are non-scroll packets following this one, they will - #and if not then we're OK anyway + # we don't change any of the refresh scheduling + # if there are non-scroll packets following this one, they will + # and if not then we're OK anyway return super().do_schedule_auto_refresh(encoding, data, region, client_options, options) @@ -2286,7 +2277,7 @@ def do_video_encode(self, encoding : str, image : ImageWrapper, options : dict) # if the client doesn't support alpha, # use an rgb input format that ignores the alpha channel: if not self.supports_transparency and src_format.find("A")>=0: - #ie: "BGRA" -> "BGRX" + # ie: "BGRA" -> "BGRX" src_format = src_format.replace("A", "X") if SAVE_VIDEO_FRAMES: @@ -2297,7 +2288,7 @@ def do_video_encode(self, encoding : str, image : ImageWrapper, options : dict) rgba_format = rgb_format.replace("BGRX", "BGRA") img = Image.frombuffer("RGBA", (w, h), memoryview_to_bytes(img_data), "raw", rgba_format, stride) kwargs = {} - if SAVE_VIDEO_FRAMES=="jpeg": + if SAVE_VIDEO_FRAMES == "jpeg": kwargs = { "quality" : 0, "optimize" : False, @@ -2311,29 +2302,29 @@ def do_video_encode(self, encoding : str, image : ImageWrapper, options : dict) img.save(filename, SAVE_VIDEO_FRAMES, **kwargs) if self.may_use_scrolling(image, options): - #scroll encoding has dealt with this image + # scroll encoding has dealt with this image return () if not self.common_video_encodings: - #we have to send using a non-video encoding as that's all we have! + # we have to send using a non-video encoding as that's all we have! videolog("no common video encodings: using fallback") return self.video_fallback(image, options) if self.image_depth not in (24, 30, 32): - #this image depth is not supported for video + # this image depth is not supported for video videolog("depth %s not supported for video: using fallback", self.image_depth) return self.video_fallback(image, options) - if self.encoding=="grayscale": + if self.encoding == "grayscale": from xpra.codecs.csc_libyuv.converter import argb_to_gray image = argb_to_gray(image) vh = self.video_helper if vh is None: - return () #shortcut when closing down + return () # shortcut when closing down if not self.check_pipeline(encoding, w, h, src_format): if self.is_cancelled(): return () - #just for diagnostics: + # just for diagnostics: supported_csc_modes = self.full_csc_modes.strtupleget(encoding) encoder_specs = vh.get_encoder_specs(encoding) encoder_types = [] @@ -2364,11 +2355,11 @@ def do_video_encode(self, encoding : str, image : ImageWrapper, options : dict) videolog("video encoder %s is not ready yet, using temporary fallback", ve) return self.video_fallback(image, options, warn=False) - #we're going to use the video encoder, - #so make sure we don't time it out: + # we're going to use the video encoder, + # so make sure we don't time it out: self.cancel_video_encoder_timer() - #dw and dh are the edges we don't handle here + # dw and dh are the edges we don't handle here width = w & self.width_mask height = h & self.height_mask videolog("video_encode%s image size: %4ix%-4i, encoder/csc size: %4ix%-4i", @@ -2409,11 +2400,11 @@ def do_video_encode(self, encoding : str, image : ImageWrapper, options : dict) if LOG_ENCODERS or compresslog.is_debug_enabled(): client_options["csc-type"] = csce.get_type() if csce else "none" - #populate client options: + # populate client options: frame = client_options.get("frame", 0) - if frame0: + if delayed > 0: self.schedule_video_encoder_flush(ve, csc, frame, x, y, scaled_size) if not data: if self.non_video_encodings and frame==0: @@ -2486,8 +2477,8 @@ def flush_video_encoder_now(self) -> None: self.flush_video_encoder() def flush_video_encoder(self) -> None: - #this runs in the UI thread as scheduled by schedule_video_encoder_flush, - #but we want to run from the encode thread to access the encoder: + # this runs in the UI thread as scheduled by schedule_video_encoder_flush, + # but we want to run from the encode thread to access the encoder: self.b_frame_flush_timer = 0 if self.b_frame_flush_data: self.call_in_encode_thread(True, self.do_flush_video_encoder) @@ -2501,7 +2492,7 @@ def do_flush_video_encoder(self) -> None: if self._video_encoder!=ve or ve.is_closed(): return if frame==0 and ve.get_type()=="x264": - #x264 has problems if we try to re-use a context after flushing the first IDR frame + # x264 has problems if we try to re-use a context after flushing the first IDR frame self.ve_clean(self._video_encoder) if self.non_video_encodings: log("do_flush_video_encoder() scheduling novideo refresh") @@ -2525,19 +2516,19 @@ def do_flush_video_encoder(self) -> None: if self.video_stream_file: self.video_stream_file.write(data) self.video_stream_file.flush() - if frame0: self.schedule_video_encoder_flush(ve, csc, frame, x, y, scaled_size) else: @@ -2557,7 +2548,7 @@ def schedule_video_encoder_timer(self) -> None: timeout = VIDEO_TIMEOUT else: timeout = VIDEO_NODETECT_TIMEOUT - if timeout>0: + if timeout > 0: self.video_encoder_timer = self.timeout_add(timeout*1000, self.video_encoder_timeout) def video_encoder_timeout(self) -> None: @@ -2578,7 +2569,7 @@ def csc_image(self, image, width, height) -> tuple: """ csce = self._csc_encoder if csce is None: - #no csc step! + # no csc step! return None, image, image.get_pixel_format(), width, height start = monotonic()